diff --git a/baseline_tools/README.md b/baseline_tools/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3bcb3fd56006564095547b438cba41004e2f36e0 --- /dev/null +++ b/baseline_tools/README.md @@ -0,0 +1,48 @@ +## 当前功能描述 +根据manifest.yaml文件提供的基线,生成目标分支软件包的差异表格。 + +## 原理 +根据yocto-meta-openeuler/.oebuild/manifest.yaml文件提供的基线,下载基线仓库到src_tmp(和openeuler fetch同样原理,仅下载基线代码,depth为1),生成基线和目标分支当前最新提交的关键差异(spec文件差异),差异存放于{local name}.oe.specdiff,进而汇总specdiff到输出表格output.xlsx + +## 使用方法 +1. 安装脚本工具依赖 + ``` + pip install yaml pandas openpyxl + ``` +3. 检查black.repo黑名单,在黑名单中的local name对应仓库,不会进行差异分析(当前文件包含kernel和src-kernel) + ``` + # cat black.repo + allwinner-kernel + kernel-5.10 + kernel-5.10-tag3093 + rockchip-kernel + src-allwinner-kernel + src-kernel-5.10 + src-kernel-5.10-tag3093 + src-rockchip-kernel + ``` +5. 将要分析的基线manifest.yaml文件(yocto-meta-openeuler/.oebuild/manifest.yaml),**拷贝到本工具目录**(oe_makediff.sh文件所在目录) + ``` + cp xxx/yocto-meta-openeuler/.oebuild/manifest.yaml ./ + ``` +6. 确认目标分支名,注意区分src-openeuler通用分支名和ROS软件包的特殊分支名,修改oe_makediff.sh文件中的target_branch变量和target_branch_ros变量。例: + ``` + target_branch="openEuler-22.03-LTS-SP4" + target_branch_ros="Multi-Version_ros-humble_openEuler-22.03-LTS-SP2" + ``` + 注意,还有一个变量force_makediff,对应值配置功能如下: + ``` + force_makediff="true" #用于已经执行过一遍脚本(下载好全部仓库),可加速增量更新。 + force_makediff="false" #用于断点继续下载分析仓库,建议作为默认配置。 + ``` +7. **在本工具目录**,执行oe_makediff.sh + ``` + ./oe_makediff.sh + ``` + **脚本执行了什么?**——该脚本将会下载基线仓库到src_tmp并输出目标差异,分析目标分支时,将先尝试分析target_branch,如果目标分支不存在,对比target_branch_ros,如果存在target_branch_ros,认为是ros包,否则标记为目标分支。 + **脚本支持断点继续执行**——原理:下载和分析中的仓库,会获取/创建filelock.{baselinecommit}文件锁,仓库完成分析会释放/删除该文件锁,重新执行时,对于已经有目录但没有锁的仓库,会跳过,有文件锁的仓库说明未分析完,将删除该仓库重新分析。 + +8. **在本工具目录**,执行python3脚本make_xls.py,汇总diff数据生成output.xlsx表格 + ``` + python3 make_xls.py + ``` diff --git a/baseline_tools/black.repo b/baseline_tools/black.repo new file mode 100644 index 0000000000000000000000000000000000000000..ee82b4d49531d4ecddf27e8dbc652c100e22a54d --- /dev/null +++ b/baseline_tools/black.repo @@ -0,0 +1,8 @@ +allwinner-kernel +kernel-5.10 +kernel-5.10-tag3093 +rockchip-kernel +src-allwinner-kernel +src-kernel-5.10 +src-kernel-5.10-tag3093 +src-rockchip-kernel diff --git a/baseline_tools/format_info.py b/baseline_tools/format_info.py new file mode 100644 index 0000000000000000000000000000000000000000..178747d96d67c614dc295cac95b82f40b69f7b36 --- /dev/null +++ b/baseline_tools/format_info.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python +# coding=utf-8 +import yaml + +# 加载 YAML 文件 +with open('manifest.yaml', 'r') as file: + manifest_data = yaml.safe_load(file) + +for reponame, repo_info in manifest_data['manifest_list'].items(): + remote_url = repo_info['remote_url'] + version = repo_info['version'] + print(f"{reponame}#{remote_url}#{version}") diff --git a/baseline_tools/make_xls.py b/baseline_tools/make_xls.py new file mode 100644 index 0000000000000000000000000000000000000000..ba2f403e6e61ccbb5ec77561a8656a202b515b8e --- /dev/null +++ b/baseline_tools/make_xls.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# coding=utf-8 +import os +import pandas as pd + +# 设置源目录和目标Excel文件名 +src_dir = 'src_tmp' +output_excel = 'output.xlsx' + +# 创建一个空的DataFrame来存储数据 +data = [] + +# 遍历src_tmp目录下的所有子目录 +for subdir, dirs, files in os.walk(src_dir): + for file in files: + # 检查文件名是否符合[文件夹名].oe.specdiff的模式 + if file.endswith('.oe.specdiff') and file.startswith(os.path.basename(subdir) + '.'): + # 读取文件内容 + with open(os.path.join(subdir, file), 'r', encoding='utf-8') as f: + lines = f.readlines() + + # 检查文件是否有足够的行 + if len(lines) < 3: + print(f"{subdir} 已跳过,specdiff文件格式异常") + continue # 跳过行数不足的文件 + + # 提取所需信息 + folder_name = os.path.basename(subdir) + target_branch_name = lines[0].strip() # 第一行:目标分支名 + target_branch_commit = lines[1].strip() # 第二行:目标分支commit号 + diff_analysis = ''.join(lines[2:]).strip() # 第三行及其之后:差异分析 + + # 将数据添加到列表中 + data.append((folder_name, target_branch_name, target_branch_commit, diff_analysis)) + +# 将数据转换为DataFrame +df = pd.DataFrame(data, columns=['LOCALNAME', '目标分支名', '目标分支commit号', '差异分析']) + +# 保存到Excel文件 +df.to_excel(output_excel, index=False) + +print(f"Excel文件已保存到:{output_excel}") diff --git a/baseline_tools/oe_makediff.sh b/baseline_tools/oe_makediff.sh new file mode 100755 index 0000000000000000000000000000000000000000..e5c28acc31459702b31e9059a3320c371c1338f6 --- /dev/null +++ b/baseline_tools/oe_makediff.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +target_branch="openEuler-22.03-LTS-SP4" +target_branch_ros="Multi-Version_ros-humble_openEuler-22.03-LTS-SP2" +force_makediff="true" + +black_list=`cat black.repo` +infos=`python format_info.py` +for info in $infos; +do + reponame=$(echo $info | awk -F '#' '{print $1}') + url=$(echo $info | awk -F '#' '{print $2}') + basecommit=$(echo $info | awk -F '#' '{print $3}') + + # cross black list. no need to analyse + is_black=0 + for item in $black_list; + do + if [ "$item" == "$reponame" ]; then + is_black=1 + continue + fi + done + if [ "$is_black" == "1" ]; then + continue + fi + + + if [ -e src_tmp/$reponame/filelock.$basecommit ];then + # not clean, remove it + rm -rf src_tmp/$reponame + fi + if [ "$force_makediff" == "false" ];then + if [ -d src_tmp/$reponame ];then + # already done, go cross it + continue + fi + fi + + mkdir -p src_tmp/$reponame; cd src_tmp/$reponame + touch filelock.$basecommit + git remote -vv | grep upstream | grep "$url" + if [ ! "$?" == "0" ];then + echo "---------init baseline of $url------------" + git init . ; git remote add upstream $url + git fetch upstream $basecommit --depth=1 + git checkout $basecommit + fi + git fetch upstream $target_branch --depth=1 + if [ "$?" == "0" ];then + echo "$target_branch" > ${reponame}.oe.specdiff + lastcommit=$(git rev-parse upstream/$target_branch) + echo $lastcommit >> ${reponame}.oe.specdiff + git diff $basecommit upstream/$target_branch *spec >> ${reponame}.oe.specdiff + if [ $(wc -l < ${reponame}.oe.specdiff) -eq 2 ]; then + echo "无差异" >> ${reponame}.oe.specdiff + fi + else + git fetch upstream $target_branch_ros --depth=1 + if [ "$?" == "0" ];then + echo "$target_branch_ros" > ${reponame}.oe.specdiff + lastcommit=$(git rev-parse upstream/$target_branch_ros) + echo $lastcommit >> ${reponame}.oe.specdiff + git diff $basecommit upstream/$target_branch_ros *spec >> ${reponame}.oe.specdiff + if [ $(wc -l < ${reponame}.oe.specdiff) -eq 2 ]; then + echo "无差异" >> ${reponame}.oe.specdiff + fi + else + echo "目标分支不存在" > ${reponame}.oe.specdiff + echo "目标commit未知" >> ${reponame}.oe.specdiff + echo "差异未知" >> ${reponame}.oe.specdiff + fi + fi + rm filelock.$basecommit + cd - + +done + diff --git a/build_tools/README b/build_tools/README new file mode 100644 index 0000000000000000000000000000000000000000..5ae709c4dc78c0651b1490c3c8150dd9fd395e96 --- /dev/null +++ b/build_tools/README @@ -0,0 +1,2 @@ +yocto-embedded-tools/build_tools: Maintain stable and unchangeable tools + diff --git a/build_tools/gcc-tools/README.md b/build_tools/gcc-tools/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a13db58076586d12f762179853e3596c504b77ed --- /dev/null +++ b/build_tools/gcc-tools/README.md @@ -0,0 +1,57 @@ +## gcc-tools + +### 介绍 + +该python脚本用于gcc全自动化构建任务,任务流程如下: + +1. 构建选定的gcc编译链包,可选的gcc编译链有openeuler_gcc_arm64le.tar.gz、openeuler_gcc_arm32le.tar.gz、openeuler_gcc_x86_64.tar.gz、openeuler_gcc_riscv64.tar.gz。 + +2. 上传已构建好的gcc tar包到远程服务器。 + +### 使用教程: + +1. 准备交叉编译器构建所需的CI容器镜像并进入 + +``` +sudo docker pull swr.cn-north-4.myhuaweicloud.com/openeuler-embedded/openeuler-ci-gcc:latest +sudo docker run -itd --network host swr.cn-north-4.myhuaweicloud.com/openeuler-embedded/openeuler-ci-gcc:latest bash +``` + +2. 安装gcc-tools脚本所需的库 + +``` +pip install six +``` + +3. 将本仓库代码下载到/usr1目录,并更改本仓库目录的所属用户组,然后进入到本仓库目录执行main.py脚本 + +``` +sudo git clone --depth=1 https://gitee.com/openeuler/yocto-embedded-tools.git -v /usr1/yocto-embedded-tools +sudo chown -R jenkins:jenkins /usr1/yocto-embedded-tools +cd /usr1/yocto-embedded-tools +python3 build_tools/gcc-tools/main.py \ + -u remoteName \ + -p remotePasswd \ + -skey remotePkey \ + -ip remoteIp \ + -dst remoteDstDir \ + -archs "aarch64 arm32" +``` + +### 参数解析: + +-u:远程服务器登录用户名 + +-p:远程服务器登录密码 + +-skey:远程服务器登陆密钥,如果同时传入-p和-skey,则选用-skey参数 + +-ip:远程服务器IP地址 + +-dst:gcc tar包在远程服务器存放地址 + +-archs:选定的需要编译的gcc版本 + + + +执行完成看到`all task finishd successful`输出则表示所有构建全部完成,登录远程主机,查看remoteDesDir目录,该目录下会出现gcc目录,gcc目录下存放着gcc最新编译链。 \ No newline at end of file diff --git a/build_tools/gcc-tools/main.py b/build_tools/gcc-tools/main.py new file mode 100644 index 0000000000000000000000000000000000000000..854501171dc05ab2c2dcfbb017784c2a9bb19cb3 --- /dev/null +++ b/build_tools/gcc-tools/main.py @@ -0,0 +1,305 @@ +import argparse +import subprocess +import os +import logging +import getpass +from paramiko import SSHClient, AutoAddPolicy, RSAKey, SSHException + +logging.basicConfig(level = logging.INFO, format = '%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger() + +# set global param for other function to use +USER = "" +PASSWD = "" +SKEY = "" +IP = "" +BASEDST = "" +WHO = "" +XTOOLSDIR = "" +CROSSDIR = "" +ARCHLIST = [] + + +def init_args(): + """ + init args + :return: parse_args + """ + parser = argparse.ArgumentParser() + parser.add_argument("-u", type=str, dest="user", default="", required=False) + parser.add_argument("-p", type=str, dest="passwd", default="", required=False) + parser.add_argument("-skey", type=str, dest="skey", default="", required=False) + parser.add_argument("-ip", type=str, dest="ip", default="", required=False) + parser.add_argument("-dst", type=str, dest="dst", default="", required=False) + parser.add_argument("-archs", type=str, dest="archs", default="", required=False) + parser.add_argument("-test", type=bool, dest="is_test", default=False, required=False) + + return parser.parse_args() + + +def main(): + # init param + args = init_args() + + global USER, PASSWD, IP, SKEY, BASEDST, WHO, XTOOLSDIR, CROSSDIR + USER = args.user + PASSWD = args.passwd + SKEY = args.skey + IP = args.ip + BASEDST = args.dst + WHO = getpass.getuser() + XTOOLSDIR = os.path.join("/home", WHO, "x-tools") + CROSSDIR = os.path.join(os.getcwd(), "cross_tools") + get_arch_list(args.archs) + + # check ssh param if currented + if not check_param(): + logger.error("param check is not pass") + return + + # running gcc compile brfore initing environment with this function + if not prepare(cwd = CROSSDIR): + return + + # running all gcc compile tasks + if not compile_gcc_all(): + return + + command = "ls -al" + output = subprocess.run( + command, + shell = True, + encoding = "utf-8", + cwd = XTOOLSDIR) + if output.returncode == 0: + logger.info(output.stdout) + + # tar gcc to tar.gz + if not tar_and_upload_all(): + return + + logger.info("all task finishd successful") + + +def check_param(): + global USER, PASSWD, SKEY, IP, ARCHLIST + if USER == "": + logger.error("-u can not empty") + return False + + if PASSWD == "" and SKEY == "": + logger.error("-p and -skey can not empty together") + return False + + if IP == "": + logger.error("-ip can not empty") + return False + + if len(ARCHLIST) == 0: + logger.error("-archs can not empty, you must select from aarch64, arm32, x86_64 or riscv64 one or more") + return False + + if not check_ssh(): + return False + + return True + + +def get_arch_list(archs : str): + global ARCHLIST + split = archs.split(" ") + for arch in split: + ARCHLIST.append(arch) + + +def check_ssh(): + ''' + check ssh param if current + ''' + + sshCli, sshSftp = get_ssh_client() + if sshCli == None: + return False + + sshSftp.close() + sshCli.close() + + return True + + +def get_ssh_client(): + + global USER, PASSWD, SKEY, IP + + sshCli = SSHClient() + + sshCli.set_missing_host_key_policy(AutoAddPolicy) + + try: + if SKEY == "": + sshCli.connect(hostname = IP, username = USER, password = PASSWD) + else: + pri_key = RSAKey.from_private_key_file(SKEY) + sshCli.connect(hostname = IP, username = USER, pkey=pri_key) + except SSHException: + logger.error("ssh init faild") + return None,None + + return sshCli, sshCli.open_sftp() + + +def prepare(cwd): + ''' + running .prepare.sh + ''' + logger.info("====================now running prepare==========================================") + command = "./prepare.sh" + with subprocess.Popen( + command, + shell = True, + encoding = "utf-8", + stdout = subprocess.PIPE, + cwd = cwd) as proc: + + stdout = proc.stdout + for readline in stdout: + if readline.find("'cp config_riscv64 .config && ct-ng build' for build riscv64") != -1: + break + logger.info(readline) + stdout.close() + + logger.info("====================prepare successful==========================================") + + return True + + +def compile_gcc_all(): + global ARCHLIST, CROSSDIR + + if "aarch64" in ARCHLIST: + if not compile_gcc("aarch64", CROSSDIR): + return False + + if "arm32" in ARCHLIST: + if not compile_gcc("arm32", CROSSDIR): + return False + + if "x86_64" in ARCHLIST: + if not compile_gcc("x86_64", CROSSDIR): + return False + + if "riscv64" in ARCHLIST: + if not compile_gcc("riscv64", CROSSDIR): + return False + + return True + + +def compile_gcc(arch, cwd): + logger.info("====================now building gcc-{}====================================".format(arch)) + command = "cp config_{} .config && ct-ng build".format(arch) + with subprocess.Popen( + command, + shell = True, + encoding = "utf-8", + stdout = subprocess.PIPE, + cwd = cwd) as proc: + + stdout = proc.stdout + for readline in stdout: + logger.info(readline) + if readline.find("Finishing installation") != -1: + break + stdout.close() + + logger.info("====================build gcc-{} successful====================================".format(arch)) + + return True + + +def tar_and_upload_all(): + + global ARCHLIST, XTOOLSDIR + + if "aarch64" in ARCHLIST: + if not tar_and_upload("aarch64-openeuler-linux-gnu", "openeuler_gcc_arm64le", XTOOLSDIR): + return False + + if "arm32" in ARCHLIST: + if not tar_and_upload("arm-openeuler-linux-gnueabi", "openeuler_gcc_arm32le", XTOOLSDIR): + return False + + if "x86_64" in ARCHLIST: + if not tar_and_upload("x86_64-openeuler-linux-gnu", "openeuler_gcc_x86_64", XTOOLSDIR): + return False + + if "riscv64" in ARCHLIST: + if not tar_and_upload("riscv64-openeuler-linux-gnu", "openeuler_gcc_riscv64", XTOOLSDIR): + return False + + return True + + +def tar_and_upload(origin, target, cwd): + # mv origin directory to target directory as we want for tar step + command = "mv {} {} && tar zcf {}.tar.gz {}".format(origin, target, target, target) + output = subprocess.run( + command, + shell = True, + encoding = "utf-8", + cwd = cwd) + if output.returncode != 0: + logger.info("{} tar step faield".format(target)) + logger.error(output.stderr) + return False + logger.info("finished {} tar step".format(target)) + + # upload action + global BASEDST + source = os.path.join(cwd, target + ".tar.gz") + dst = os.path.join(BASEDST, "gcc") + if not upload(source = source, dst = dst): + logger.error("upload {}.tar.gz faield".format(target)) + return False + + logger.info("upload {}.tar.gz successful".format(target)) + return True + + +def upload(source, dst): + ''' + upload to remote server + ''' + global BASEDST + + filename = os.path.basename(source) + dst_file = os.path.join(dst, filename) + + sshCli, sftpCli = get_ssh_client() + if sshCli == None: + logger.error("ssh connect faild in upload function") + return False + + try: + listfile = sftpCli.listdir(dst_file) + if len(listfile) > 0: + # delete dst directory + sshCli.exec_command("rm -rf {}".format(listfile)) + sshCli.exec_command("mkdir -p {}".format(dst)) + logger.info("mkdir {} action successful".format(dst)) + except FileNotFoundError: + sshCli.exec_command("mkdir -p {}".format(dst)) + logger.info("mkdir {} action successful".format(dst)) + + # upload local resource to remote server + sftpCli.put(source, dst_file) + logger.info("put {} action successful".format(dst)) + + sftpCli.close() + sshCli.close() + + return True + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cross_tools/README.md b/cross_tools/README.md new file mode 100644 index 0000000000000000000000000000000000000000..206e8a35ae0ec33c2e9eab2fb50f5a7ea786c68a --- /dev/null +++ b/cross_tools/README.md @@ -0,0 +1,72 @@ +# cross_tools + +#### 介绍 + +该模块用于制作openEuler嵌入式的交叉编译器 + +#### 软件架构和配置说明 + +configs: 依赖工具及其crosstool-ng的各架构构建配置 + +prepare.sh: 用于下载构建所需的依赖仓库,并按照下载的路径,刷新config + +对于64位编译器,脚本中(update_feature)通过修改GCC源码,默认从lib64目录下寻找链接器,并在libstdc++.so中添加默认安全选项(relro、now、noexecstack) + +可通过ct-ng show-config查看配置基础情况(例如cp config_aarch64 .config && ct-ng show-config) + +最终配置可参见输出件*gcc -v + +例(arm64): + +```` +COLLECT_GCC=/home/openeuler/x-tools/aarch64-openeuler-linux-gnu/bin/aarch64-openeuler-linux-gnu-gcc +COLLECT_LTO_WRAPPER=/home/openeuler/x-tools/aarch64-openeuler-linux-gnu/libexec/gcc/aarch64-openeuler-linux-gnu/10.3.1/lto-wrapper +Target: aarch64-openeuler-linux-gnu +Configured with: /usr1/cross-ng_openeuler/.build/aarch64-openeuler-linux-gnu/src/gcc/configure --build=x86_64-build_pc-linux-gnu --host=x86_64-build_pc-linux-gnu --target=aarch64-openeuler-linux-gnu --prefix=/home/openeuler/x-tools/aarch64-openeuler-linux-gnu --exec_prefix=/home/openeuler/x-tools/aarch64-openeuler-linux-gnu --with-sysroot=/home/openeuler/x-tools/aarch64-openeuler-linux-gnu/aarch64-openeuler-linux-gnu/sysroot --enable-languages=c,c++,fortran --with-pkgversion='crosstool-NG 1.25.0' --enable-__cxa_atexit --disable-libmudflap --enable-libgomp --disable-libssp --disable-libquadmath --disable-libquadmath-support --disable-libsanitizer --disable-libmpx --disable-libstdcxx-verbose --with-gmp=/usr1/cross-ng_openeuler/.build/aarch64-openeuler-linux-gnu/buildtools --with-mpfr=/usr1/cross-ng_openeuler/.build/aarch64-openeuler-linux-gnu/buildtools --with-mpc=/usr1/cross-ng_openeuler/.build/aarch64-openeuler-linux-gnu/buildtools --with-isl=/usr1/cross-ng_openeuler/.build/aarch64-openeuler-linux-gnu/buildtools --enable-lto --enable-threads=posix --enable-target-optspace --enable-plugin --enable-gold --disable-nls --enable-multiarch --with-multilib-list=lp64 --with-local-prefix=/home/openeuler/x-tools/aarch64-openeuler-linux-gnu/aarch64-openeuler-linux-gnu/sysroot --enable-long-long --with-arch=armv8-a --with-gnu-as --with-gnu-ld --enable-c99 --enable-shared --enable-poison-system-directories --enable-symvers=gnu --disable-bootstrap --disable-libstdcxx-dual-abi --enable-default-pie --libdir=/home/openeuler/x-tools/aarch64-openeuler-linux-gnu/lib64 --with-build-time-tools=/home/openeuler/x-tools/aarch64-openeuler-linux-gnu/aarch64-openeuler-linux-gnu/bin +Thread model: posix +Supported LTO compression algorithms: zlib +gcc version 10.3.1 (crosstool-NG 1.25.0) +```` + +#### 使用教程 + +1. 准备交叉编译器构建所需的容器镜像并进入(以下仅为例子,实际镜像所用分支,以openEuler Embedded在线文档描述为准) + +```` + sudo docker pull swr.cn-north-4.myhuaweicloud.com/openeuler-embedded/openeuler-container + sudo docker run -idt --network host swr.cn-north-4.myhuaweicloud.com/openeuler-embedded/openeuler-container bash +```` + +2. 下载本仓库的代码,并通过脚本一键准备构建所需的代码和配置: + +```` + cd /usr1 && git clone -b master https://gitee.com/openeuler/yocto-embedded-tools.git + cd yocto-embedded-tools/cross_tools + ./prepare.sh +```` + +4. 使用普通用户,通过ct-ng工具(本容器构建环境已集成)进行构建。 + +```` + chown -R openeuler:users /usr1 + su openeuler + #aarch64: + cp config_aarch64 .config && ct-ng build + #arm32 + cp config_arm32 .config && ct-ng build + #x86_64 + cp config_x86_64 .config && ct-ng build + #riscv64 + cp config_riscv64 .config && ct-ng build +```` + +5. 等待构建完成后,在对应工作目录的./build中有构建中间件,在/home/openeuler/x-tools/下有构建的输出件 + +以arm64为例,重命名目录后打包即可使用。/home/openeuler/x-tools/aarch64-openeuler-linux-gnu下的内容和yocto构建容器的/usr1/openeuler/gcc/openeuler_gcc_arm64le下的内容一致 + +```` + cd /home/openeuler/x-tools/ + mv aarch64-openeuler-linux-gnu openeuler_gcc_arm64le + tar czf openeuler_gcc_arm64le.tar.gz openeuler_gcc_arm64le +```` + diff --git a/cross_tools/configs/config.xml b/cross_tools/configs/config.xml new file mode 100644 index 0000000000000000000000000000000000000000..8736398ab111f9cbe7d35c599bd2678a04a4e450 --- /dev/null +++ b/cross_tools/configs/config.xml @@ -0,0 +1,33 @@ +COMPILER_INFO="gcc 12.3.0" +KERNEL="kernel" +KERNEL_BRANCH="5.10.0-136.20.0" +MUSLC_BRANCH="master" +COMMON_BRANCH="openEuler-23.09" +GCC="gcc" +GCC_DIR="gcc-12.3.0" +BINUTILS="binutils" +BINUTILS_DIR="binutils-2.40" +MPFR="mpfr" +MPFR_DIR="mpfr-4.2.0" +GMP="gmp" +GMP_DIR="gmp-6.3.0" +MPC="libmpc" +MPC_DIR="mpc-1.3.1" +ISL="isl" +ISL_DIR="isl-0.24" +GLIBC="glibc" +GLIBC_DIR="glibc-2.38" +MUSLC="musl" +MUSLC_DIR="musl-1.2.4" +EXPAT="expat" +EXPAT_DIR="expat-2.5.0" +GETTEXT="gettext" +GETTEXT_DIR="gettext-0.22" +LIBICONV="libiconv" +LIBICONV_DIR="libiconv-1.16" +NCURSES="ncurses" +NCURSES_DIR="ncurses-6.4" +ZLIB="zlib" +ZLIB_DIR="zlib-1.2.13" +GDB="gdb" +GDB_DIR="gdb-12.1" diff --git a/cross_tools/configs/config_aarch64 b/cross_tools/configs/config_aarch64 new file mode 100644 index 0000000000000000000000000000000000000000..0a296c2d1a6cbe8047deeeb806c496b8f16ee7f5 --- /dev/null +++ b/cross_tools/configs/config_aarch64 @@ -0,0 +1,1026 @@ +# +# Automatically generated file; DO NOT EDIT. +# crosstool-NG 1.25.0 Configuration +# +CT_CONFIGURE_has_static_link=y +CT_CONFIGURE_has_cxx11=y +CT_CONFIGURE_has_curl=y +CT_CONFIGURE_has_rsync=y +CT_CONFIGURE_has_make_3_81_or_newer=y +CT_CONFIGURE_has_make_4_0_or_newer=y +CT_CONFIGURE_has_libtool_2_4_or_newer=y +CT_CONFIGURE_has_libtoolize_2_4_or_newer=y +CT_CONFIGURE_has_autoconf_2_65_or_newer=y +CT_CONFIGURE_has_autoreconf_2_65_or_newer=y +CT_CONFIGURE_has_automake_1_15_or_newer=y +CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y +CT_CONFIGURE_has_python_3_4_or_newer=y +CT_CONFIGURE_has_bison_2_7_or_newer=y +CT_CONFIGURE_has_python=y +CT_CONFIGURE_has_git=y +CT_CONFIGURE_has_md5sum=y +CT_CONFIGURE_has_sha1sum=y +CT_CONFIGURE_has_sha256sum=y +CT_CONFIGURE_has_sha512sum=y +CT_CONFIGURE_has_install_with_strip_program=y +CT_VERSION="1.25.0" +CT_VCHECK="" +CT_CONFIG_VERSION_ENV="4" +CT_CONFIG_VERSION_CURRENT="4" +CT_CONFIG_VERSION="4" +CT_MODULES=y + +# +# Paths and misc options +# + +# +# crosstool-NG behavior +# +# CT_OBSOLETE is not set +CT_EXPERIMENTAL=y +# CT_ALLOW_BUILD_AS_ROOT is not set +# CT_DEBUG_CT is not set + +# +# Paths +# +CT_LOCAL_TARBALLS_DIR="${HOME}/src" +CT_SAVE_TARBALLS=y +# CT_TARBALLS_BUILDROOT_LAYOUT is not set +CT_WORK_DIR="${CT_TOP_DIR}/.build" +CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_BUILD_DIR="${CT_BUILD_TOP_DIR}/build" +CT_PREFIX_DIR="${CT_PREFIX:-${HOME}/x-tools}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_RM_RF_PREFIX_DIR=y +CT_REMOVE_DOCS=y +CT_INSTALL_LICENSES=y +CT_PREFIX_DIR_RO=y +CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y +# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set + +# +# Downloading +# +CT_DOWNLOAD_AGENT_CURL=y +# CT_DOWNLOAD_AGENT_NONE is not set +# CT_FORBID_DOWNLOAD is not set +# CT_FORCE_DOWNLOAD is not set +CT_CONNECT_TIMEOUT=10 +CT_DOWNLOAD_CURL_OPTIONS="--location --ftp-pasv --retry 3 --fail --silent" +# CT_ONLY_DOWNLOAD is not set +# CT_USE_MIRROR is not set +CT_VERIFY_DOWNLOAD_DIGEST=y +CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y +# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set +CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" +# CT_VERIFY_DOWNLOAD_SIGNATURE is not set + +# +# Extracting +# +# CT_FORCE_EXTRACT is not set +CT_OVERRIDE_CONFIG_GUESS_SUB=y +# CT_ONLY_EXTRACT is not set +CT_PATCH_BUNDLED=y +# CT_PATCH_LOCAL is not set +# CT_PATCH_BUNDLED_LOCAL is not set +# CT_PATCH_LOCAL_BUNDLED is not set +# CT_PATCH_NONE is not set +CT_PATCH_ORDER="bundled" + +# +# Build behavior +# +CT_PARALLEL_JOBS=0 +CT_LOAD="" +CT_USE_PIPES=y +CT_EXTRA_CFLAGS_FOR_BUILD="" +CT_EXTRA_CXXFLAGS_FOR_BUILD="" +CT_EXTRA_LDFLAGS_FOR_BUILD="" +CT_EXTRA_CFLAGS_FOR_HOST="" +CT_EXTRA_LDFLAGS_FOR_HOST="" +# CT_CONFIG_SHELL_SH is not set +# CT_CONFIG_SHELL_ASH is not set +CT_CONFIG_SHELL_BASH=y +# CT_CONFIG_SHELL_CUSTOM is not set +CT_CONFIG_SHELL="${bash}" + +# +# Logging +# +# CT_LOG_ERROR is not set +# CT_LOG_WARN is not set +# CT_LOG_INFO is not set +CT_LOG_EXTRA=y +# CT_LOG_ALL is not set +# CT_LOG_DEBUG is not set +CT_LOG_LEVEL_MAX="EXTRA" +# CT_LOG_SEE_TOOLS_WARN is not set +CT_LOG_PROGRESS_BAR=y +CT_LOG_TO_FILE=y +CT_LOG_FILE_COMPRESS=y +# end of Paths and misc options + +# +# Target options +# +# CT_ARCH_ALPHA is not set +# CT_ARCH_ARC is not set +CT_ARCH_ARM=y +# CT_ARCH_AVR is not set +# CT_ARCH_C6X is not set +# CT_ARCH_M68K is not set +# CT_ARCH_MICROBLAZE is not set +# CT_ARCH_MIPS is not set +# CT_ARCH_MOXIE is not set +# CT_ARCH_MSP430 is not set +# CT_ARCH_NIOS2 is not set +# CT_ARCH_POWERPC is not set +# CT_ARCH_PRU is not set +# CT_ARCH_RISCV is not set +# CT_ARCH_S390 is not set +# CT_ARCH_SH is not set +# CT_ARCH_SPARC is not set +# CT_ARCH_X86 is not set +# CT_ARCH_XTENSA is not set +CT_ARCH="arm" +CT_ARCH_CHOICE_KSYM="ARM" +CT_ARCH_CPU="" +CT_ARCH_TUNE="" +CT_ARCH_ARM_SHOW=y + +# +# Options for arm +# +CT_ARCH_ARM_PKG_KSYM="" +CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR C6X M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 POWERPC PRU RISCV S390 SH SPARC X86 XTENSA" +CT_ARCH_SUFFIX="" +# CT_OMIT_TARGET_VENDOR is not set + +# +# Generic target options +# +CT_MULTILIB=y +# CT_DEMULTILIB is not set +CT_ARCH_SUPPORTS_BOTH_MMU=y +CT_ARCH_DEFAULT_HAS_MMU=y +CT_ARCH_USE_MMU=y +CT_ARCH_SUPPORTS_FLAT_FORMAT=y +CT_ARCH_SUPPORTS_EITHER_ENDIAN=y +CT_ARCH_DEFAULT_LE=y +# CT_ARCH_BE is not set +CT_ARCH_LE=y +CT_ARCH_ENDIAN="little" +CT_ARCH_SUPPORTS_32=y +CT_ARCH_SUPPORTS_64=y +CT_ARCH_DEFAULT_32=y +CT_ARCH_BITNESS=64 +# CT_ARCH_32 is not set +CT_ARCH_64=y + +# +# Target optimisations +# +CT_ARCH_SUPPORTS_WITH_ARCH=y +CT_ARCH_SUPPORTS_WITH_CPU=y +CT_ARCH_SUPPORTS_WITH_TUNE=y +CT_ARCH_EXCLUSIVE_WITH_CPU=y +CT_ARCH_ARCH="" +CT_TARGET_CFLAGS="" +CT_TARGET_LDFLAGS="" +# end of Target options + +# +# Toolchain options +# + +# +# General toolchain options +# +CT_FORCE_SYSROOT=y +CT_USE_SYSROOT=y +CT_SYSROOT_NAME="sysroot" +CT_SYSROOT_DIR_PREFIX="" +CT_WANTS_STATIC_LINK=y +CT_WANTS_STATIC_LINK_CXX=y +# CT_STATIC_TOOLCHAIN is not set +CT_SHOW_CT_VERSION=y +CT_TOOLCHAIN_PKGVERSION="" +CT_TOOLCHAIN_BUGURL="" + +# +# Tuple completion and aliasing +# +CT_TARGET_VENDOR="openeuler" +CT_TARGET_ALIAS_SED_EXPR="" +CT_TARGET_ALIAS="" + +# +# Toolchain type +# +# CT_NATIVE is not set +CT_CROSS=y +# CT_CROSS_NATIVE is not set +# CT_CANADIAN is not set +CT_TOOLCHAIN_TYPE="cross" + +# +# Build system +# +CT_BUILD="" +CT_BUILD_PREFIX="" +CT_BUILD_SUFFIX="" + +# +# Misc options +# +# CT_TOOLCHAIN_ENABLE_NLS is not set +# end of Toolchain options + +# +# Operating System +# +CT_KERNEL_SUPPORTS_SHARED_LIBS=y +# CT_KERNEL_BARE_METAL is not set +CT_KERNEL_LINUX=y +CT_KERNEL="linux" +CT_KERNEL_CHOICE_KSYM="LINUX" +CT_KERNEL_LINUX_SHOW=y + +# +# Options for linux +# +CT_KERNEL_LINUX_PKG_KSYM="LINUX" +CT_LINUX_DIR_NAME="linux" +CT_LINUX_USE_WWW_KERNEL_ORG=y +# CT_LINUX_USE_ORACLE is not set +CT_LINUX_USE="LINUX" +CT_LINUX_PKG_NAME="linux" +# CT_LINUX_SRC_RELEASE is not set +# CT_LINUX_SRC_DEVEL is not set +CT_LINUX_SRC_CUSTOM=y +CT_LINUX_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/kernel" +CT_LINUX_PATCH_GLOBAL=y +# CT_LINUX_PATCH_BUNDLED is not set +# CT_LINUX_PATCH_LOCAL is not set +# CT_LINUX_PATCH_BUNDLED_LOCAL is not set +# CT_LINUX_PATCH_LOCAL_BUNDLED is not set +# CT_LINUX_PATCH_NONE is not set +CT_LINUX_PATCH_ORDER="global" +# CT_LINUX_VERY_NEW is not set +# CT_LINUX_V_5_16 is not set +# CT_LINUX_V_5_15 is not set +# CT_LINUX_V_5_14 is not set +# CT_LINUX_V_5_13 is not set +# CT_LINUX_V_5_12 is not set +# CT_LINUX_V_5_11 is not set +CT_LINUX_V_5_10=y +# CT_LINUX_V_5_9 is not set +# CT_LINUX_V_5_8 is not set +# CT_LINUX_V_5_7 is not set +# CT_LINUX_V_5_4 is not set +# CT_LINUX_V_5_3 is not set +# CT_LINUX_V_5_2 is not set +# CT_LINUX_V_5_1 is not set +# CT_LINUX_V_5_0 is not set +# CT_LINUX_V_4_20 is not set +# CT_LINUX_V_4_19 is not set +# CT_LINUX_V_4_18 is not set +# CT_LINUX_V_4_17 is not set +# CT_LINUX_V_4_16 is not set +# CT_LINUX_V_4_15 is not set +# CT_LINUX_V_4_14 is not set +# CT_LINUX_V_4_13 is not set +# CT_LINUX_V_4_12 is not set +# CT_LINUX_V_4_11 is not set +# CT_LINUX_V_4_10 is not set +# CT_LINUX_V_4_9 is not set +# CT_LINUX_V_4_4 is not set +# CT_LINUX_V_4_1 is not set +# CT_LINUX_V_3_16 is not set +# CT_LINUX_V_3_13 is not set +# CT_LINUX_V_3_12 is not set +# CT_LINUX_V_3_10 is not set +CT_LINUX_VERSION="5.10.100" +CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" +CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" +CT_LINUX_5_12_or_older=y +CT_LINUX_older_than_5_12=y +CT_LINUX_later_than_5_3=y +CT_LINUX_5_3_or_later=y +CT_LINUX_later_than_4_8=y +CT_LINUX_4_8_or_later=y +CT_LINUX_later_than_3_7=y +CT_LINUX_3_7_or_later=y +CT_LINUX_REQUIRE_3_7_or_later=y +CT_LINUX_later_than_3_2=y +CT_LINUX_3_2_or_later=y +CT_LINUX_REQUIRE_3_2_or_later=y +CT_KERNEL_DEP_RSYNC=y +CT_KERNEL_LINUX_VERBOSITY_0=y +# CT_KERNEL_LINUX_VERBOSITY_1 is not set +# CT_KERNEL_LINUX_VERBOSITY_2 is not set +CT_KERNEL_LINUX_VERBOSE_LEVEL=0 +CT_KERNEL_LINUX_INSTALL_CHECK=y +CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" + +# +# Common kernel options +# +CT_SHARED_LIBS=y +# end of Operating System + +# +# Binary utilities +# +CT_ARCH_BINFMT_ELF=y +CT_BINUTILS_BINUTILS=y +CT_BINUTILS="binutils" +CT_BINUTILS_CHOICE_KSYM="BINUTILS" +CT_BINUTILS_BINUTILS_SHOW=y + +# +# Options for binutils +# +CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" +CT_BINUTILS_DIR_NAME="binutils" +CT_BINUTILS_USE_GNU=y +# CT_BINUTILS_USE_LINARO is not set +# CT_BINUTILS_USE_ORACLE is not set +CT_BINUTILS_USE="BINUTILS" +CT_BINUTILS_PKG_NAME="binutils" +# CT_BINUTILS_SRC_RELEASE is not set +# CT_BINUTILS_SRC_DEVEL is not set +CT_BINUTILS_SRC_CUSTOM=y +CT_BINUTILS_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/binutils/binutils-2.37" +CT_BINUTILS_PATCH_GLOBAL=y +# CT_BINUTILS_PATCH_BUNDLED is not set +# CT_BINUTILS_PATCH_LOCAL is not set +# CT_BINUTILS_PATCH_BUNDLED_LOCAL is not set +# CT_BINUTILS_PATCH_LOCAL_BUNDLED is not set +# CT_BINUTILS_PATCH_NONE is not set +CT_BINUTILS_PATCH_ORDER="global" +# CT_BINUTILS_VERY_NEW is not set +# CT_BINUTILS_V_2_38 is not set +CT_BINUTILS_V_2_37=y +# CT_BINUTILS_V_2_36 is not set +# CT_BINUTILS_V_2_35 is not set +# CT_BINUTILS_V_2_34 is not set +# CT_BINUTILS_V_2_33 is not set +# CT_BINUTILS_V_2_32 is not set +# CT_BINUTILS_V_2_31 is not set +# CT_BINUTILS_V_2_30 is not set +# CT_BINUTILS_V_2_29 is not set +# CT_BINUTILS_V_2_28 is not set +# CT_BINUTILS_V_2_27 is not set +# CT_BINUTILS_V_2_26 is not set +CT_BINUTILS_VERSION="2.37" +CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" +CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" +CT_BINUTILS_later_than_2_30=y +CT_BINUTILS_2_30_or_later=y +CT_BINUTILS_later_than_2_27=y +CT_BINUTILS_2_27_or_later=y +CT_BINUTILS_later_than_2_26=y +CT_BINUTILS_2_26_or_later=y + +# +# GNU binutils +# +CT_BINUTILS_GOLD_SUPPORTS_ARCH=y +CT_BINUTILS_GOLD_SUPPORT=y +CT_BINUTILS_FORCE_LD_BFD_DEFAULT=y +# CT_BINUTILS_LINKER_LD is not set +CT_BINUTILS_LINKER_LD_GOLD=y +CT_BINUTILS_GOLD_INSTALLED=y +CT_BINUTILS_GOLD_THREADS=y +CT_BINUTILS_LINKER_BOTH=y +CT_BINUTILS_LINKERS_LIST="ld,gold" +CT_BINUTILS_LD_WRAPPER=y +CT_BINUTILS_LINKER_DEFAULT="bfd" +CT_BINUTILS_PLUGINS=y +CT_BINUTILS_RELRO=y +CT_BINUTILS_DETERMINISTIC_ARCHIVES=y +CT_BINUTILS_EXTRA_CONFIG_ARRAY="" +# CT_BINUTILS_FOR_TARGET is not set +CT_ALL_BINUTILS_CHOICES="BINUTILS" +# end of Binary utilities + +# +# C-library +# +CT_LIBC_GLIBC=y +# CT_LIBC_MUSL is not set +# CT_LIBC_UCLIBC_NG is not set +CT_LIBC="glibc" +CT_LIBC_CHOICE_KSYM="GLIBC" +CT_THREADS="nptl" +CT_LIBC_GLIBC_SHOW=y + +# +# Options for glibc +# +CT_LIBC_GLIBC_PKG_KSYM="GLIBC" +CT_GLIBC_DIR_NAME="glibc" +CT_GLIBC_USE_GNU=y +# CT_GLIBC_USE_ORACLE is not set +CT_GLIBC_USE="GLIBC" +CT_GLIBC_PKG_NAME="glibc" +# CT_GLIBC_SRC_RELEASE is not set +# CT_GLIBC_SRC_DEVEL is not set +CT_GLIBC_SRC_CUSTOM=y +CT_GLIBC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/glibc/glibc-2.36" +CT_GLIBC_PATCH_GLOBAL=y +# CT_GLIBC_PATCH_BUNDLED is not set +# CT_GLIBC_PATCH_LOCAL is not set +# CT_GLIBC_PATCH_BUNDLED_LOCAL is not set +# CT_GLIBC_PATCH_LOCAL_BUNDLED is not set +# CT_GLIBC_PATCH_NONE is not set +CT_GLIBC_PATCH_ORDER="global" +# CT_GLIBC_VERY_NEW is not set +CT_GLIBC_V_2_35=y +# CT_GLIBC_V_2_34 is not set +# CT_GLIBC_V_2_33 is not set +# CT_GLIBC_V_2_32 is not set +# CT_GLIBC_V_2_31 is not set +# CT_GLIBC_V_2_30 is not set +# CT_GLIBC_V_2_29 is not set +# CT_GLIBC_V_2_28 is not set +# CT_GLIBC_V_2_27 is not set +# CT_GLIBC_V_2_26 is not set +# CT_GLIBC_V_2_25 is not set +# CT_GLIBC_V_2_24 is not set +# CT_GLIBC_V_2_23 is not set +# CT_GLIBC_V_2_19 is not set +# CT_GLIBC_V_2_17 is not set +CT_GLIBC_VERSION="2.35" +CT_GLIBC_MIRRORS="$(CT_Mirrors GNU glibc)" +CT_GLIBC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GLIBC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GLIBC_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_GLIBC_SIGNATURE_FORMAT="packed/.sig" +CT_GLIBC_later_than_2_34=y +CT_GLIBC_2_34_or_later=y +CT_GLIBC_later_than_2_32=y +CT_GLIBC_2_32_or_later=y +CT_GLIBC_later_than_2_31=y +CT_GLIBC_2_31_or_later=y +CT_GLIBC_later_than_2_30=y +CT_GLIBC_2_30_or_later=y +CT_GLIBC_later_than_2_29=y +CT_GLIBC_2_29_or_later=y +CT_GLIBC_later_than_2_28=y +CT_GLIBC_2_28_or_later=y +CT_GLIBC_later_than_2_27=y +CT_GLIBC_2_27_or_later=y +CT_GLIBC_later_than_2_26=y +CT_GLIBC_2_26_or_later=y +CT_GLIBC_later_than_2_25=y +CT_GLIBC_2_25_or_later=y +CT_GLIBC_later_than_2_24=y +CT_GLIBC_2_24_or_later=y +CT_GLIBC_later_than_2_23=y +CT_GLIBC_2_23_or_later=y +CT_GLIBC_later_than_2_20=y +CT_GLIBC_2_20_or_later=y +CT_GLIBC_later_than_2_17=y +CT_GLIBC_2_17_or_later=y +CT_GLIBC_later_than_2_14=y +CT_GLIBC_2_14_or_later=y +CT_GLIBC_DEP_KERNEL_HEADERS_VERSION=y +CT_GLIBC_DEP_BINUTILS=y +CT_GLIBC_DEP_GCC=y +CT_GLIBC_DEP_PYTHON=y +CT_GLIBC_BUILD_SSP=y +CT_GLIBC_HAS_LIBIDN_ADDON=y +# CT_GLIBC_USE_LIBIDN_ADDON is not set +CT_GLIBC_NO_SPARC_V8=y +CT_GLIBC_EXTRA_CONFIG_ARRAY="--enable-crypt libc_cv_rtlddir=/lib64 libc_cv_slibdir=/lib64 --libdir=/usr/lib64 " +CT_GLIBC_CONFIGPARMS="" +CT_GLIBC_EXTRA_CFLAGS="" +# CT_GLIBC_ENABLE_FORTIFIED_BUILD is not set +# CT_GLIBC_DISABLE_VERSIONING is not set +CT_GLIBC_OLDEST_ABI="" +CT_GLIBC_FORCE_UNWIND=y +# CT_GLIBC_LOCALES is not set +# CT_GLIBC_KERNEL_VERSION_NONE is not set +CT_GLIBC_KERNEL_VERSION_AS_HEADERS=y +# CT_GLIBC_KERNEL_VERSION_CHOSEN is not set +CT_GLIBC_MIN_KERNEL="5.10.100" +CT_GLIBC_SSP_DEFAULT=y +# CT_GLIBC_SSP_NO is not set +# CT_GLIBC_SSP_YES is not set +# CT_GLIBC_SSP_ALL is not set +# CT_GLIBC_SSP_STRONG is not set +CT_GLIBC_ENABLE_WERROR=y +# CT_GLIBC_ENABLE_COMMON_FLAG is not set +CT_ALL_LIBC_CHOICES="AVR_LIBC BIONIC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE UCLIBC_NG" +CT_LIBC_SUPPORT_THREADS_ANY=y +CT_LIBC_SUPPORT_THREADS_NATIVE=y + +# +# Common C library options +# +CT_THREADS_NATIVE=y +# CT_CREATE_LDSO_CONF is not set +CT_LIBC_XLDD=y +# end of C-library + +# +# C compiler +# +CT_CC_CORE_NEEDED=y +CT_CC_SUPPORT_CXX=y +CT_CC_SUPPORT_FORTRAN=y +CT_CC_SUPPORT_ADA=y +CT_CC_SUPPORT_OBJC=y +CT_CC_SUPPORT_OBJCXX=y +CT_CC_SUPPORT_GOLANG=y +CT_CC_GCC=y +CT_CC="gcc" +CT_CC_CHOICE_KSYM="GCC" +CT_CC_GCC_SHOW=y + +# +# Options for gcc +# +CT_CC_GCC_PKG_KSYM="GCC" +CT_GCC_DIR_NAME="gcc" +CT_GCC_USE_GNU=y +# CT_GCC_USE_LINARO is not set +CT_GCC_USE="GCC" +CT_GCC_PKG_NAME="gcc" +# CT_GCC_SRC_RELEASE is not set +# CT_GCC_SRC_DEVEL is not set +CT_GCC_SRC_CUSTOM=y +CT_GCC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gcc/gcc-10.3.0" +CT_GCC_PATCH_GLOBAL=y +# CT_GCC_PATCH_BUNDLED is not set +# CT_GCC_PATCH_LOCAL is not set +# CT_GCC_PATCH_BUNDLED_LOCAL is not set +# CT_GCC_PATCH_LOCAL_BUNDLED is not set +# CT_GCC_PATCH_NONE is not set +CT_GCC_PATCH_ORDER="global" +# CT_GCC_VERY_NEW is not set +# CT_GCC_V_11 is not set +CT_GCC_V_10=y +# CT_GCC_V_9 is not set +# CT_GCC_V_8 is not set +# CT_GCC_V_7 is not set +# CT_GCC_V_6 is not set +CT_GCC_VERSION="10.3.0" +CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" +CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GCC_SIGNATURE_FORMAT="" +CT_GCC_11_or_older=y +CT_GCC_older_than_11=y +CT_GCC_later_than_10=y +CT_GCC_10_or_later=y +CT_GCC_later_than_9=y +CT_GCC_9_or_later=y +CT_GCC_later_than_8=y +CT_GCC_8_or_later=y +CT_GCC_later_than_7=y +CT_GCC_7_or_later=y +CT_GCC_later_than_6=y +CT_GCC_6_or_later=y +CT_GCC_REQUIRE_6_or_later=y +CT_GCC_later_than_5=y +CT_GCC_5_or_later=y +CT_GCC_REQUIRE_5_or_later=y +CT_GCC_later_than_4_9=y +CT_GCC_4_9_or_later=y +CT_GCC_REQUIRE_4_9_or_later=y +CT_CC_GCC_ENABLE_PLUGINS=y +CT_CC_GCC_GOLD=y +CT_CC_GCC_HAS_LIBMPX=y +CT_CC_GCC_ENABLE_CXX_FLAGS="" +CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_EXTRA_CONFIG_ARRAY="--with-arch=armv8-a --with-gnu-as --with-gnu-ld --enable-c99 --enable-shared --enable-poison-system-directories --enable-symvers=gnu --disable-bootstrap --disable-libstdcxx-dual-abi --enable-default-pie --libdir=\"${CT_PREFIX_DIR}/lib64\" --with-build-time-tools=\"${CT_PREFIX_DIR}/${CT_TARGET}/bin\"" +CT_CC_GCC_MULTILIB_LIST="lp64" +CT_CC_GCC_STATIC_LIBSTDCXX=y +# CT_CC_GCC_SYSTEM_ZLIB is not set +CT_CC_GCC_CONFIG_TLS=m + +# +# Optimisation features +# +CT_CC_GCC_USE_GRAPHITE=y +CT_CC_GCC_USE_LTO=y +CT_CC_GCC_LTO_ZSTD=m + +# +# Settings for libraries running on target +# +CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y +# CT_CC_GCC_LIBMUDFLAP is not set +CT_CC_GCC_LIBGOMP=y +# CT_CC_GCC_LIBSSP is not set +# CT_CC_GCC_LIBQUADMATH is not set +# CT_CC_GCC_LIBSANITIZER is not set + +# +# Misc. obscure options. +# +CT_CC_CXA_ATEXIT=y +CT_CC_GCC_TM_CLONE_REGISTRY=m +# CT_CC_GCC_DISABLE_PCH is not set +CT_CC_GCC_SJLJ_EXCEPTIONS=m +CT_CC_GCC_LDBL_128=m +# CT_CC_GCC_BUILD_ID is not set +CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y +# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set +# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set +# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set +CT_CC_GCC_LNK_HASH_STYLE="" +CT_CC_GCC_DEC_FLOATS_AUTO=y +# CT_CC_GCC_DEC_FLOATS_BID is not set +# CT_CC_GCC_DEC_FLOATS_DPD is not set +# CT_CC_GCC_DEC_FLOATS_NO is not set +CT_CC_GCC_DEC_FLOATS="" +CT_ALL_CC_CHOICES="GCC" + +# +# Additional supported languages: +# +CT_CC_LANG_CXX=y +CT_CC_LANG_FORTRAN=y +# CT_CC_LANG_ADA is not set +# CT_CC_LANG_OBJC is not set +# CT_CC_LANG_OBJCXX is not set +# CT_CC_LANG_GOLANG is not set +CT_CC_LANG_OTHERS="" +# end of C compiler + +# +# Debug facilities +# +# CT_DEBUG_DUMA is not set +CT_DEBUG_GDB=y +CT_DEBUG_GDB_PKG_KSYM="GDB" +CT_GDB_DIR_NAME="gdb" +CT_GDB_PKG_NAME="gdb" +# CT_GDB_SRC_RELEASE is not set +# CT_GDB_SRC_DEVEL is not set +CT_GDB_SRC_CUSTOM=y +CT_GDB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gdb/gdb-12.1" +CT_GDB_PATCH_GLOBAL=y +# CT_GDB_PATCH_BUNDLED is not set +# CT_GDB_PATCH_LOCAL is not set +# CT_GDB_PATCH_BUNDLED_LOCAL is not set +# CT_GDB_PATCH_LOCAL_BUNDLED is not set +# CT_GDB_PATCH_NONE is not set +CT_GDB_PATCH_ORDER="global" +# CT_GDB_VERY_NEW is not set +CT_GDB_V_11=y +# CT_GDB_V_10 is not set +# CT_GDB_V_9 is not set +# CT_GDB_V_8_3 is not set +CT_GDB_VERSION="11.2" +CT_GDB_MIRRORS="$(CT_Mirrors GNU gdb) $(CT_Mirrors sourceware gdb/releases)" +CT_GDB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GDB_SIGNATURE_FORMAT="" +CT_GDB_later_than_11=y +CT_GDB_11_or_later=y +CT_GDB_later_than_10=y +CT_GDB_10_or_later=y +CT_GDB_later_than_8_3=y +CT_GDB_8_3_or_later=y +CT_GDB_later_than_8_0=y +CT_GDB_8_0_or_later=y +CT_GDB_later_than_7_12=y +CT_GDB_7_12_or_later=y +CT_GDB_later_than_7_11=y +CT_GDB_7_11_or_later=y +CT_GDB_CROSS=y +# CT_GDB_CROSS_STATIC is not set +# CT_GDB_CROSS_SIM is not set +CT_GDB_CROSS_PYTHON=y +CT_GDB_CROSS_PYTHON_BINARY="" +CT_GDB_CROSS_EXTRA_CONFIG_ARRAY="" +# CT_GDB_NATIVE is not set +CT_GDB_GDBSERVER=y +# CT_GDB_NATIVE_BUILD_IPA_LIB is not set +# CT_GDB_NATIVE_STATIC is not set +# CT_GDB_NATIVE_STATIC_LIBSTDCXX is not set +CT_GDB_GDBSERVER_TOPLEVEL=y +# CT_DEBUG_LTRACE is not set +# CT_DEBUG_STRACE is not set +CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" +# end of Debug facilities + +# +# Companion libraries +# +# CT_COMPLIBS_CHECK is not set +# CT_COMP_LIBS_CLOOG is not set +CT_COMP_LIBS_EXPAT=y +CT_COMP_LIBS_EXPAT_PKG_KSYM="EXPAT" +CT_EXPAT_DIR_NAME="expat" +CT_EXPAT_PKG_NAME="expat" +# CT_EXPAT_SRC_RELEASE is not set +# CT_EXPAT_SRC_DEVEL is not set +CT_EXPAT_SRC_CUSTOM=y +CT_EXPAT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/expat/expat-2.5.0" +CT_EXPAT_PATCH_GLOBAL=y +# CT_EXPAT_PATCH_BUNDLED is not set +# CT_EXPAT_PATCH_LOCAL is not set +# CT_EXPAT_PATCH_BUNDLED_LOCAL is not set +# CT_EXPAT_PATCH_LOCAL_BUNDLED is not set +# CT_EXPAT_PATCH_NONE is not set +CT_EXPAT_PATCH_ORDER="global" +CT_EXPAT_VERY_NEW=y +# CT_EXPAT_V_2_4 is not set +CT_EXPAT_VERSION="new" +CT_EXPAT_MIRRORS="http://downloads.sourceforge.net/project/expat/expat/${CT_EXPAT_VERSION} https://github.com/libexpat/libexpat/releases/download/R_${CT_EXPAT_VERSION//./_}" +CT_EXPAT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2 .tar.gz" +CT_EXPAT_SIGNATURE_FORMAT="" +CT_COMP_LIBS_GETTEXT=y +CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" +CT_GETTEXT_DIR_NAME="gettext" +CT_GETTEXT_PKG_NAME="gettext" +# CT_GETTEXT_SRC_RELEASE is not set +# CT_GETTEXT_SRC_DEVEL is not set +CT_GETTEXT_SRC_CUSTOM=y +CT_GETTEXT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gettext/gettext-0.21" +CT_GETTEXT_PATCH_GLOBAL=y +# CT_GETTEXT_PATCH_BUNDLED is not set +# CT_GETTEXT_PATCH_LOCAL is not set +# CT_GETTEXT_PATCH_BUNDLED_LOCAL is not set +# CT_GETTEXT_PATCH_LOCAL_BUNDLED is not set +# CT_GETTEXT_PATCH_NONE is not set +CT_GETTEXT_PATCH_ORDER="global" +# CT_GETTEXT_VERY_NEW is not set +CT_GETTEXT_V_0_21=y +# CT_GETTEXT_V_0_20_1 is not set +# CT_GETTEXT_V_0_19_8_1 is not set +CT_GETTEXT_VERSION="0.21" +CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" +CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" +CT_GETTEXT_0_21_or_later=y +CT_GETTEXT_0_21_or_older=y +CT_GETTEXT_INCOMPATIBLE_WITH_UCLIBC_NG=y + +# +# This version of gettext is not compatible with uClibc-NG. Select +# + +# +# a different version if uClibc-NG is used on the target or (in a +# + +# +# Canadian cross build) on the host. +# +CT_COMP_LIBS_GMP=y +CT_COMP_LIBS_GMP_PKG_KSYM="GMP" +CT_GMP_DIR_NAME="gmp" +CT_GMP_PKG_NAME="gmp" +# CT_GMP_SRC_RELEASE is not set +# CT_GMP_SRC_DEVEL is not set +CT_GMP_SRC_CUSTOM=y +CT_GMP_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gmp/gmp-6.2.1" +CT_GMP_PATCH_GLOBAL=y +# CT_GMP_PATCH_BUNDLED is not set +# CT_GMP_PATCH_LOCAL is not set +# CT_GMP_PATCH_BUNDLED_LOCAL is not set +# CT_GMP_PATCH_LOCAL_BUNDLED is not set +# CT_GMP_PATCH_NONE is not set +CT_GMP_PATCH_ORDER="global" +# CT_GMP_VERY_NEW is not set +CT_GMP_V_6_2=y +# CT_GMP_V_6_1 is not set +CT_GMP_VERSION="6.2.1" +CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" +CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" +CT_GMP_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_ISL=y +CT_COMP_LIBS_ISL_PKG_KSYM="ISL" +CT_ISL_DIR_NAME="isl" +CT_ISL_PKG_NAME="isl" +# CT_ISL_SRC_RELEASE is not set +# CT_ISL_SRC_DEVEL is not set +CT_ISL_SRC_CUSTOM=y +CT_ISL_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/isl/isl-0.24" +CT_ISL_PATCH_GLOBAL=y +# CT_ISL_PATCH_BUNDLED is not set +# CT_ISL_PATCH_LOCAL is not set +# CT_ISL_PATCH_BUNDLED_LOCAL is not set +# CT_ISL_PATCH_LOCAL_BUNDLED is not set +# CT_ISL_PATCH_NONE is not set +CT_ISL_PATCH_ORDER="global" +# CT_ISL_VERY_NEW is not set +# CT_ISL_V_0_24 is not set +# CT_ISL_V_0_23 is not set +# CT_ISL_V_0_22 is not set +# CT_ISL_V_0_21 is not set +# CT_ISL_V_0_20 is not set +# CT_ISL_V_0_19 is not set +# CT_ISL_V_0_18 is not set +# CT_ISL_V_0_17 is not set +CT_ISL_V_0_16=y +# CT_ISL_V_0_15 is not set +CT_ISL_VERSION="0.16.1" +CT_ISL_MIRRORS="https://libisl.sourceforge.io" +CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_ISL_SIGNATURE_FORMAT="" +CT_ISL_0_18_or_older=y +CT_ISL_older_than_0_18=y +CT_ISL_later_than_0_15=y +CT_ISL_0_15_or_later=y +# CT_COMP_LIBS_LIBELF is not set +CT_COMP_LIBS_LIBICONV=y +CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" +CT_LIBICONV_DIR_NAME="libiconv" +CT_LIBICONV_PKG_NAME="libiconv" +# CT_LIBICONV_SRC_RELEASE is not set +# CT_LIBICONV_SRC_DEVEL is not set +CT_LIBICONV_SRC_CUSTOM=y +CT_LIBICONV_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libiconv/libiconv-1.16" +CT_LIBICONV_PATCH_GLOBAL=y +# CT_LIBICONV_PATCH_BUNDLED is not set +# CT_LIBICONV_PATCH_LOCAL is not set +# CT_LIBICONV_PATCH_BUNDLED_LOCAL is not set +# CT_LIBICONV_PATCH_LOCAL_BUNDLED is not set +# CT_LIBICONV_PATCH_NONE is not set +CT_LIBICONV_PATCH_ORDER="global" +# CT_LIBICONV_VERY_NEW is not set +CT_LIBICONV_V_1_16=y +# CT_LIBICONV_V_1_15 is not set +CT_LIBICONV_VERSION="1.16" +CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" +CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" +CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPC=y +CT_COMP_LIBS_MPC_PKG_KSYM="MPC" +CT_MPC_DIR_NAME="mpc" +CT_MPC_PKG_NAME="mpc" +# CT_MPC_SRC_RELEASE is not set +# CT_MPC_SRC_DEVEL is not set +CT_MPC_SRC_CUSTOM=y +CT_MPC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libmpc/mpc-1.3.1" +CT_MPC_PATCH_GLOBAL=y +# CT_MPC_PATCH_BUNDLED is not set +# CT_MPC_PATCH_LOCAL is not set +# CT_MPC_PATCH_BUNDLED_LOCAL is not set +# CT_MPC_PATCH_LOCAL_BUNDLED is not set +# CT_MPC_PATCH_NONE is not set +CT_MPC_PATCH_ORDER="global" +# CT_MPC_VERY_NEW is not set +CT_MPC_V_1_2=y +# CT_MPC_V_1_1 is not set +# CT_MPC_V_1_0 is not set +CT_MPC_VERSION="1.2.1" +CT_MPC_MIRRORS="http://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" +CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_FORMATS=".tar.gz" +CT_MPC_SIGNATURE_FORMAT="packed/.sig" +CT_MPC_later_than_1_1_0=y +CT_MPC_1_1_0_or_later=y +CT_COMP_LIBS_MPFR=y +CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" +CT_MPFR_DIR_NAME="mpfr" +CT_MPFR_PKG_NAME="mpfr" +# CT_MPFR_SRC_RELEASE is not set +# CT_MPFR_SRC_DEVEL is not set +CT_MPFR_SRC_CUSTOM=y +CT_MPFR_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/mpfr/mpfr-4.1.0" +CT_MPFR_PATCH_GLOBAL=y +# CT_MPFR_PATCH_BUNDLED is not set +# CT_MPFR_PATCH_LOCAL is not set +# CT_MPFR_PATCH_BUNDLED_LOCAL is not set +# CT_MPFR_PATCH_LOCAL_BUNDLED is not set +# CT_MPFR_PATCH_NONE is not set +CT_MPFR_PATCH_ORDER="global" +# CT_MPFR_VERY_NEW is not set +CT_MPFR_V_4_1=y +# CT_MPFR_V_4_0 is not set +# CT_MPFR_V_3_1 is not set +CT_MPFR_VERSION="4.1.0" +CT_MPFR_MIRRORS="http://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" +CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" +CT_MPFR_SIGNATURE_FORMAT="packed/.asc" +CT_MPFR_later_than_4_0_0=y +CT_MPFR_4_0_0_or_later=y +CT_COMP_LIBS_NCURSES=y +CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" +CT_NCURSES_DIR_NAME="ncurses" +CT_NCURSES_PKG_NAME="ncurses" +# CT_NCURSES_SRC_RELEASE is not set +# CT_NCURSES_SRC_DEVEL is not set +CT_NCURSES_SRC_CUSTOM=y +CT_NCURSES_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/ncurses/ncurses-6.4" +CT_NCURSES_PATCH_GLOBAL=y +# CT_NCURSES_PATCH_BUNDLED is not set +# CT_NCURSES_PATCH_LOCAL is not set +# CT_NCURSES_PATCH_BUNDLED_LOCAL is not set +# CT_NCURSES_PATCH_LOCAL_BUNDLED is not set +# CT_NCURSES_PATCH_NONE is not set +CT_NCURSES_PATCH_ORDER="global" +CT_NCURSES_VERY_NEW=y +# CT_NCURSES_V_6_2 is not set +# CT_NCURSES_V_6_1 is not set +# CT_NCURSES_V_6_0 is not set +CT_NCURSES_VERSION="new" +CT_NCURSES_MIRRORS="https://invisible-mirror.net/archives/ncurses $(CT_Mirrors GNU ncurses)" +CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" +CT_NCURSES_SIGNATURE_FORMAT="packed/.sig" +CT_NCURSES_NEW_ABI=y +CT_NCURSES_HOST_CONFIG_ARGS="" +CT_NCURSES_HOST_DISABLE_DB=y +CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" +CT_NCURSES_TARGET_CONFIG_ARGS="" +# CT_NCURSES_TARGET_DISABLE_DB is not set +CT_NCURSES_TARGET_FALLBACKS="" +CT_COMP_LIBS_ZLIB=y +CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" +CT_ZLIB_DIR_NAME="zlib" +CT_ZLIB_PKG_NAME="zlib" +# CT_ZLIB_SRC_RELEASE is not set +# CT_ZLIB_SRC_DEVEL is not set +CT_ZLIB_SRC_CUSTOM=y +CT_ZLIB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/zlib/zlib-1.2.13" +CT_ZLIB_PATCH_GLOBAL=y +# CT_ZLIB_PATCH_BUNDLED is not set +# CT_ZLIB_PATCH_LOCAL is not set +# CT_ZLIB_PATCH_BUNDLED_LOCAL is not set +# CT_ZLIB_PATCH_LOCAL_BUNDLED is not set +# CT_ZLIB_PATCH_NONE is not set +CT_ZLIB_PATCH_ORDER="global" +# CT_ZLIB_VERY_NEW is not set +CT_ZLIB_V_1_2_12=y +CT_ZLIB_VERSION="1.2.12" +CT_ZLIB_MIRRORS="http://downloads.sourceforge.net/project/libpng/zlib/${CT_ZLIB_VERSION} https://www.zlib.net/" +CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" +CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP GNUPRUMCU ISL LIBELF LIBICONV MPC MPFR NCURSES NEWLIB_NANO PICOLIBC ZLIB" +CT_LIBICONV_NEEDED=y +CT_GETTEXT_NEEDED=y +CT_GMP_NEEDED=y +CT_MPFR_NEEDED=y +CT_ISL_NEEDED=y +CT_MPC_NEEDED=y +CT_EXPAT_NEEDED=y +CT_NCURSES_NEEDED=y +CT_ZLIB_NEEDED=y +CT_LIBICONV=y +CT_GETTEXT=y +CT_GMP=y +CT_MPFR=y +CT_ISL=y +CT_MPC=y +CT_EXPAT=y +CT_NCURSES=y +CT_ZLIB=y +# end of Companion libraries + +# +# Companion tools +# +# CT_COMP_TOOLS_FOR_HOST is not set +# CT_COMP_TOOLS_AUTOCONF is not set +# CT_COMP_TOOLS_AUTOMAKE is not set +# CT_COMP_TOOLS_BISON is not set +# CT_COMP_TOOLS_DTC is not set +# CT_COMP_TOOLS_LIBTOOL is not set +# CT_COMP_TOOLS_M4 is not set +# CT_COMP_TOOLS_MAKE is not set +CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" +# end of Companion tools + +# +# Test suite +# +# CT_TEST_SUITE_GCC is not set +# end of Test suite diff --git a/cross_tools/configs/config_aarch64-musl b/cross_tools/configs/config_aarch64-musl new file mode 100644 index 0000000000000000000000000000000000000000..cb1a42c6cab0ad85b794b287cc22e7fa6aa11d30 --- /dev/null +++ b/cross_tools/configs/config_aarch64-musl @@ -0,0 +1,972 @@ +# +# Automatically generated file; DO NOT EDIT. +# crosstool-NG 1.25.0 Configuration +# +CT_CONFIGURE_has_static_link=y +CT_CONFIGURE_has_cxx11=y +CT_CONFIGURE_has_curl=y +CT_CONFIGURE_has_rsync=y +CT_CONFIGURE_has_make_3_81_or_newer=y +CT_CONFIGURE_has_make_4_0_or_newer=y +CT_CONFIGURE_has_libtool_2_4_or_newer=y +CT_CONFIGURE_has_libtoolize_2_4_or_newer=y +CT_CONFIGURE_has_autoconf_2_65_or_newer=y +CT_CONFIGURE_has_autoreconf_2_65_or_newer=y +CT_CONFIGURE_has_automake_1_15_or_newer=y +CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y +CT_CONFIGURE_has_python_3_4_or_newer=y +CT_CONFIGURE_has_bison_2_7_or_newer=y +CT_CONFIGURE_has_python=y +CT_CONFIGURE_has_git=y +CT_CONFIGURE_has_md5sum=y +CT_CONFIGURE_has_sha1sum=y +CT_CONFIGURE_has_sha256sum=y +CT_CONFIGURE_has_sha512sum=y +CT_CONFIGURE_has_install_with_strip_program=y +CT_VERSION="1.25.0" +CT_VCHECK="" +CT_CONFIG_VERSION_ENV="4" +CT_CONFIG_VERSION_CURRENT="4" +CT_CONFIG_VERSION="4" +CT_MODULES=y + +# +# Paths and misc options +# + +# +# crosstool-NG behavior +# +# CT_OBSOLETE is not set +CT_EXPERIMENTAL=y +# CT_ALLOW_BUILD_AS_ROOT is not set +# CT_DEBUG_CT is not set + +# +# Paths +# +CT_LOCAL_TARBALLS_DIR="${HOME}/src" +CT_SAVE_TARBALLS=y +# CT_TARBALLS_BUILDROOT_LAYOUT is not set +CT_WORK_DIR="${CT_TOP_DIR}/.build" +CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_BUILD_DIR="${CT_BUILD_TOP_DIR}/build" +CT_PREFIX_DIR="${CT_PREFIX:-${HOME}/x-tools}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_RM_RF_PREFIX_DIR=y +CT_REMOVE_DOCS=y +CT_INSTALL_LICENSES=y +CT_PREFIX_DIR_RO=y +CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y +# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set + +# +# Downloading +# +CT_DOWNLOAD_AGENT_CURL=y +# CT_DOWNLOAD_AGENT_NONE is not set +# CT_FORBID_DOWNLOAD is not set +# CT_FORCE_DOWNLOAD is not set +CT_CONNECT_TIMEOUT=10 +CT_DOWNLOAD_CURL_OPTIONS="--location --ftp-pasv --retry 3 --fail --silent" +# CT_ONLY_DOWNLOAD is not set +# CT_USE_MIRROR is not set +CT_VERIFY_DOWNLOAD_DIGEST=y +CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y +# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set +CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" +# CT_VERIFY_DOWNLOAD_SIGNATURE is not set + +# +# Extracting +# +# CT_FORCE_EXTRACT is not set +CT_OVERRIDE_CONFIG_GUESS_SUB=y +# CT_ONLY_EXTRACT is not set +CT_PATCH_BUNDLED=y +# CT_PATCH_LOCAL is not set +# CT_PATCH_BUNDLED_LOCAL is not set +# CT_PATCH_LOCAL_BUNDLED is not set +# CT_PATCH_NONE is not set +CT_PATCH_ORDER="bundled" + +# +# Build behavior +# +CT_PARALLEL_JOBS=0 +CT_LOAD="" +CT_USE_PIPES=y +CT_EXTRA_CFLAGS_FOR_BUILD="" +CT_EXTRA_CXXFLAGS_FOR_BUILD="" +CT_EXTRA_LDFLAGS_FOR_BUILD="" +CT_EXTRA_CFLAGS_FOR_HOST="" +CT_EXTRA_LDFLAGS_FOR_HOST="" +# CT_CONFIG_SHELL_SH is not set +# CT_CONFIG_SHELL_ASH is not set +CT_CONFIG_SHELL_BASH=y +# CT_CONFIG_SHELL_CUSTOM is not set +CT_CONFIG_SHELL="${bash}" + +# +# Logging +# +# CT_LOG_ERROR is not set +# CT_LOG_WARN is not set +# CT_LOG_INFO is not set +CT_LOG_EXTRA=y +# CT_LOG_ALL is not set +# CT_LOG_DEBUG is not set +CT_LOG_LEVEL_MAX="EXTRA" +# CT_LOG_SEE_TOOLS_WARN is not set +CT_LOG_PROGRESS_BAR=y +CT_LOG_TO_FILE=y +CT_LOG_FILE_COMPRESS=y +# end of Paths and misc options + +# +# Target options +# +# CT_ARCH_ALPHA is not set +# CT_ARCH_ARC is not set +CT_ARCH_ARM=y +# CT_ARCH_AVR is not set +# CT_ARCH_C6X is not set +# CT_ARCH_M68K is not set +# CT_ARCH_MICROBLAZE is not set +# CT_ARCH_MIPS is not set +# CT_ARCH_MOXIE is not set +# CT_ARCH_MSP430 is not set +# CT_ARCH_NIOS2 is not set +# CT_ARCH_POWERPC is not set +# CT_ARCH_PRU is not set +# CT_ARCH_RISCV is not set +# CT_ARCH_S390 is not set +# CT_ARCH_SH is not set +# CT_ARCH_SPARC is not set +# CT_ARCH_X86 is not set +# CT_ARCH_XTENSA is not set +CT_ARCH="arm" +CT_ARCH_CHOICE_KSYM="ARM" +CT_ARCH_CPU="" +CT_ARCH_TUNE="" +CT_ARCH_ARM_SHOW=y + +# +# Options for arm +# +CT_ARCH_ARM_PKG_KSYM="" +CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR C6X M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 POWERPC PRU RISCV S390 SH SPARC X86 XTENSA" +CT_ARCH_SUFFIX="" +# CT_OMIT_TARGET_VENDOR is not set + +# +# Generic target options +# +# CT_MULTILIB is not set +CT_DEMULTILIB=y +CT_ARCH_SUPPORTS_BOTH_MMU=y +CT_ARCH_DEFAULT_HAS_MMU=y +CT_ARCH_USE_MMU=y +CT_ARCH_SUPPORTS_FLAT_FORMAT=y +CT_ARCH_SUPPORTS_EITHER_ENDIAN=y +CT_ARCH_DEFAULT_LE=y +# CT_ARCH_BE is not set +CT_ARCH_LE=y +CT_ARCH_ENDIAN="little" +CT_ARCH_SUPPORTS_32=y +CT_ARCH_SUPPORTS_64=y +CT_ARCH_DEFAULT_32=y +CT_ARCH_BITNESS=64 +# CT_ARCH_32 is not set +CT_ARCH_64=y + +# +# Target optimisations +# +CT_ARCH_SUPPORTS_WITH_ARCH=y +CT_ARCH_SUPPORTS_WITH_CPU=y +CT_ARCH_SUPPORTS_WITH_TUNE=y +CT_ARCH_EXCLUSIVE_WITH_CPU=y +CT_ARCH_ARCH="" +CT_TARGET_CFLAGS="" +CT_TARGET_LDFLAGS="" +# end of Target options + +# +# Toolchain options +# + +# +# General toolchain options +# +CT_FORCE_SYSROOT=y +CT_USE_SYSROOT=y +CT_SYSROOT_NAME="sysroot" +CT_SYSROOT_DIR_PREFIX="" +CT_WANTS_STATIC_LINK=y +CT_WANTS_STATIC_LINK_CXX=y +# CT_STATIC_TOOLCHAIN is not set +CT_SHOW_CT_VERSION=y +CT_TOOLCHAIN_PKGVERSION="" +CT_TOOLCHAIN_BUGURL="" + +# +# Tuple completion and aliasing +# +CT_TARGET_VENDOR="openeuler" +CT_TARGET_ALIAS_SED_EXPR="" +CT_TARGET_ALIAS="" + +# +# Toolchain type +# +# CT_NATIVE is not set +CT_CROSS=y +# CT_CROSS_NATIVE is not set +# CT_CANADIAN is not set +CT_TOOLCHAIN_TYPE="cross" + +# +# Build system +# +CT_BUILD="" +CT_BUILD_PREFIX="" +CT_BUILD_SUFFIX="" + +# +# Misc options +# +# CT_TOOLCHAIN_ENABLE_NLS is not set +# end of Toolchain options + +# +# Operating System +# +CT_KERNEL_SUPPORTS_SHARED_LIBS=y +# CT_KERNEL_BARE_METAL is not set +CT_KERNEL_LINUX=y +CT_KERNEL="linux" +CT_KERNEL_CHOICE_KSYM="LINUX" +CT_KERNEL_LINUX_SHOW=y + +# +# Options for linux +# +CT_KERNEL_LINUX_PKG_KSYM="LINUX" +CT_LINUX_DIR_NAME="linux" +CT_LINUX_USE_WWW_KERNEL_ORG=y +# CT_LINUX_USE_ORACLE is not set +CT_LINUX_USE="LINUX" +CT_LINUX_PKG_NAME="linux" +# CT_LINUX_SRC_RELEASE is not set +# CT_LINUX_SRC_DEVEL is not set +CT_LINUX_SRC_CUSTOM=y +CT_LINUX_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/kernel" +CT_LINUX_PATCH_GLOBAL=y +# CT_LINUX_PATCH_BUNDLED is not set +# CT_LINUX_PATCH_LOCAL is not set +# CT_LINUX_PATCH_BUNDLED_LOCAL is not set +# CT_LINUX_PATCH_LOCAL_BUNDLED is not set +# CT_LINUX_PATCH_NONE is not set +CT_LINUX_PATCH_ORDER="global" +# CT_LINUX_VERY_NEW is not set +# CT_LINUX_V_5_16 is not set +# CT_LINUX_V_5_15 is not set +# CT_LINUX_V_5_14 is not set +# CT_LINUX_V_5_13 is not set +# CT_LINUX_V_5_12 is not set +# CT_LINUX_V_5_11 is not set +CT_LINUX_V_5_10=y +# CT_LINUX_V_5_9 is not set +# CT_LINUX_V_5_8 is not set +# CT_LINUX_V_5_7 is not set +# CT_LINUX_V_5_4 is not set +# CT_LINUX_V_5_3 is not set +# CT_LINUX_V_5_2 is not set +# CT_LINUX_V_5_1 is not set +# CT_LINUX_V_5_0 is not set +# CT_LINUX_V_4_20 is not set +# CT_LINUX_V_4_19 is not set +# CT_LINUX_V_4_18 is not set +# CT_LINUX_V_4_17 is not set +# CT_LINUX_V_4_16 is not set +# CT_LINUX_V_4_15 is not set +# CT_LINUX_V_4_14 is not set +# CT_LINUX_V_4_13 is not set +# CT_LINUX_V_4_12 is not set +# CT_LINUX_V_4_11 is not set +# CT_LINUX_V_4_10 is not set +# CT_LINUX_V_4_9 is not set +# CT_LINUX_V_4_4 is not set +# CT_LINUX_V_4_1 is not set +# CT_LINUX_V_3_16 is not set +# CT_LINUX_V_3_13 is not set +# CT_LINUX_V_3_12 is not set +# CT_LINUX_V_3_10 is not set +CT_LINUX_VERSION="5.10.100" +CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" +CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" +CT_LINUX_5_12_or_older=y +CT_LINUX_older_than_5_12=y +CT_LINUX_later_than_5_3=y +CT_LINUX_5_3_or_later=y +CT_LINUX_later_than_4_8=y +CT_LINUX_4_8_or_later=y +CT_LINUX_later_than_3_7=y +CT_LINUX_3_7_or_later=y +CT_LINUX_REQUIRE_3_7_or_later=y +CT_LINUX_later_than_3_2=y +CT_LINUX_3_2_or_later=y +CT_LINUX_REQUIRE_3_2_or_later=y +CT_KERNEL_DEP_RSYNC=y +CT_KERNEL_LINUX_VERBOSITY_0=y +# CT_KERNEL_LINUX_VERBOSITY_1 is not set +# CT_KERNEL_LINUX_VERBOSITY_2 is not set +CT_KERNEL_LINUX_VERBOSE_LEVEL=0 +CT_KERNEL_LINUX_INSTALL_CHECK=y +CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" + +# +# Common kernel options +# +CT_SHARED_LIBS=y +# end of Operating System + +# +# Binary utilities +# +CT_ARCH_BINFMT_ELF=y +CT_BINUTILS_BINUTILS=y +CT_BINUTILS="binutils" +CT_BINUTILS_CHOICE_KSYM="BINUTILS" +CT_BINUTILS_BINUTILS_SHOW=y + +# +# Options for binutils +# +CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" +CT_BINUTILS_DIR_NAME="binutils" +CT_BINUTILS_USE_GNU=y +# CT_BINUTILS_USE_LINARO is not set +# CT_BINUTILS_USE_ORACLE is not set +CT_BINUTILS_USE="BINUTILS" +CT_BINUTILS_PKG_NAME="binutils" +# CT_BINUTILS_SRC_RELEASE is not set +# CT_BINUTILS_SRC_DEVEL is not set +CT_BINUTILS_SRC_CUSTOM=y +CT_BINUTILS_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/binutils/binutils-2.37" +CT_BINUTILS_PATCH_GLOBAL=y +# CT_BINUTILS_PATCH_BUNDLED is not set +# CT_BINUTILS_PATCH_LOCAL is not set +# CT_BINUTILS_PATCH_BUNDLED_LOCAL is not set +# CT_BINUTILS_PATCH_LOCAL_BUNDLED is not set +# CT_BINUTILS_PATCH_NONE is not set +CT_BINUTILS_PATCH_ORDER="global" +# CT_BINUTILS_VERY_NEW is not set +# CT_BINUTILS_V_2_38 is not set +CT_BINUTILS_V_2_37=y +# CT_BINUTILS_V_2_36 is not set +# CT_BINUTILS_V_2_35 is not set +# CT_BINUTILS_V_2_34 is not set +# CT_BINUTILS_V_2_33 is not set +# CT_BINUTILS_V_2_32 is not set +# CT_BINUTILS_V_2_31 is not set +# CT_BINUTILS_V_2_30 is not set +# CT_BINUTILS_V_2_29 is not set +# CT_BINUTILS_V_2_28 is not set +# CT_BINUTILS_V_2_27 is not set +# CT_BINUTILS_V_2_26 is not set +CT_BINUTILS_VERSION="2.37" +CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" +CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" +CT_BINUTILS_later_than_2_30=y +CT_BINUTILS_2_30_or_later=y +CT_BINUTILS_later_than_2_27=y +CT_BINUTILS_2_27_or_later=y +CT_BINUTILS_later_than_2_26=y +CT_BINUTILS_2_26_or_later=y + +# +# GNU binutils +# +CT_BINUTILS_GOLD_SUPPORTS_ARCH=y +CT_BINUTILS_GOLD_SUPPORT=y +CT_BINUTILS_FORCE_LD_BFD_DEFAULT=y +# CT_BINUTILS_LINKER_LD is not set +CT_BINUTILS_LINKER_LD_GOLD=y +CT_BINUTILS_GOLD_INSTALLED=y +CT_BINUTILS_GOLD_THREADS=y +CT_BINUTILS_LINKER_BOTH=y +CT_BINUTILS_LINKERS_LIST="ld,gold" +CT_BINUTILS_LD_WRAPPER=y +CT_BINUTILS_LINKER_DEFAULT="bfd" +CT_BINUTILS_PLUGINS=y +CT_BINUTILS_RELRO=y +CT_BINUTILS_DETERMINISTIC_ARCHIVES=y +CT_BINUTILS_EXTRA_CONFIG_ARRAY="" +# CT_BINUTILS_FOR_TARGET is not set +CT_ALL_BINUTILS_CHOICES="BINUTILS" +# end of Binary utilities + +# +# C-library +# +# CT_LIBC_GLIBC is not set +CT_LIBC_MUSL=y +# CT_LIBC_UCLIBC_NG is not set +CT_LIBC="musl" +CT_LIBC_CHOICE_KSYM="MUSL" +CT_THREADS="musl" +CT_LIBC_MUSL_SHOW=y + +# +# Options for musl +# +CT_LIBC_MUSL_PKG_KSYM="MUSL" +CT_MUSL_DIR_NAME="musl" +CT_MUSL_PKG_NAME="musl" +# CT_MUSL_SRC_RELEASE is not set +# CT_MUSL_SRC_DEVEL is not set +CT_MUSL_SRC_CUSTOM=y +CT_MUSL_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/musl/musl-1.2.3" +CT_MUSL_PATCH_GLOBAL=y +# CT_MUSL_PATCH_BUNDLED is not set +# CT_MUSL_PATCH_LOCAL is not set +# CT_MUSL_PATCH_BUNDLED_LOCAL is not set +# CT_MUSL_PATCH_LOCAL_BUNDLED is not set +# CT_MUSL_PATCH_NONE is not set +CT_MUSL_PATCH_ORDER="global" +# CT_MUSL_VERY_NEW is not set +CT_MUSL_V_1_2_3=y +# CT_MUSL_V_1_2_2 is not set +# CT_MUSL_V_1_2_1 is not set +# CT_MUSL_V_1_1_24 is not set +# CT_MUSL_V_1_1_23 is not set +# CT_MUSL_V_1_1_22 is not set +# CT_MUSL_V_1_1_21 is not set +# CT_MUSL_V_1_1_20 is not set +# CT_MUSL_V_1_1_19 is not set +# CT_MUSL_V_1_1_18 is not set +# CT_MUSL_V_1_1_17 is not set +# CT_MUSL_V_1_1_16 is not set +CT_MUSL_VERSION="1.2.3" +CT_MUSL_MIRRORS="http://www.musl-libc.org/releases" +CT_MUSL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MUSL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MUSL_ARCHIVE_FORMATS=".tar.gz" +CT_MUSL_SIGNATURE_FORMAT="packed/.asc" +# CT_LIBC_MUSL_DEBUG is not set +# CT_LIBC_MUSL_WARNINGS is not set +# CT_LIBC_MUSL_OPTIMIZE_NONE is not set +CT_LIBC_MUSL_OPTIMIZE_AUTO=y +# CT_LIBC_MUSL_OPTIMIZE_SPEED is not set +# CT_LIBC_MUSL_OPTIMIZE_SIZE is not set +CT_LIBC_MUSL_OPTIMIZE="auto" +CT_ALL_LIBC_CHOICES="AVR_LIBC BIONIC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE UCLIBC_NG" +CT_LIBC_SUPPORT_THREADS_ANY=y +CT_LIBC_SUPPORT_THREADS_NATIVE=y + +# +# Common C library options +# +CT_THREADS_NATIVE=y +# CT_CREATE_LDSO_CONF is not set +CT_LIBC_XLDD=y +# end of C-library + +# +# C compiler +# +CT_CC_CORE_NEEDED=y +CT_CC_SUPPORT_CXX=y +CT_CC_SUPPORT_FORTRAN=y +CT_CC_SUPPORT_ADA=y +CT_CC_SUPPORT_OBJC=y +CT_CC_SUPPORT_OBJCXX=y +CT_CC_SUPPORT_GOLANG=y +CT_CC_GCC=y +CT_CC="gcc" +CT_CC_CHOICE_KSYM="GCC" +CT_CC_GCC_SHOW=y + +# +# Options for gcc +# +CT_CC_GCC_PKG_KSYM="GCC" +CT_GCC_DIR_NAME="gcc" +CT_GCC_USE_GNU=y +# CT_GCC_USE_LINARO is not set +CT_GCC_USE="GCC" +CT_GCC_PKG_NAME="gcc" +# CT_GCC_SRC_RELEASE is not set +# CT_GCC_SRC_DEVEL is not set +CT_GCC_SRC_CUSTOM=y +CT_GCC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gcc/gcc-10.3.0" +CT_GCC_PATCH_GLOBAL=y +# CT_GCC_PATCH_BUNDLED is not set +# CT_GCC_PATCH_LOCAL is not set +# CT_GCC_PATCH_BUNDLED_LOCAL is not set +# CT_GCC_PATCH_LOCAL_BUNDLED is not set +# CT_GCC_PATCH_NONE is not set +CT_GCC_PATCH_ORDER="global" +# CT_GCC_VERY_NEW is not set +# CT_GCC_V_11 is not set +CT_GCC_V_10=y +# CT_GCC_V_9 is not set +# CT_GCC_V_8 is not set +# CT_GCC_V_7 is not set +# CT_GCC_V_6 is not set +CT_GCC_VERSION="10.3.0" +CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" +CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GCC_SIGNATURE_FORMAT="" +CT_GCC_11_or_older=y +CT_GCC_older_than_11=y +CT_GCC_later_than_10=y +CT_GCC_10_or_later=y +CT_GCC_later_than_9=y +CT_GCC_9_or_later=y +CT_GCC_later_than_8=y +CT_GCC_8_or_later=y +CT_GCC_later_than_7=y +CT_GCC_7_or_later=y +CT_GCC_later_than_6=y +CT_GCC_6_or_later=y +CT_GCC_REQUIRE_6_or_later=y +CT_GCC_later_than_5=y +CT_GCC_5_or_later=y +CT_GCC_REQUIRE_5_or_later=y +CT_GCC_later_than_4_9=y +CT_GCC_4_9_or_later=y +CT_GCC_REQUIRE_4_9_or_later=y +CT_CC_GCC_ENABLE_PLUGINS=y +CT_CC_GCC_GOLD=y +CT_CC_GCC_HAS_LIBMPX=y +CT_CC_GCC_ENABLE_CXX_FLAGS="" +CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_EXTRA_CONFIG_ARRAY="--with-arch=armv8-a --with-gnu-as --with-gnu-ld --enable-c99 --enable-shared --enable-poison-system-directories --enable-symvers=gnu --disable-bootstrap --disable-libstdcxx-dual-abi --enable-default-pie --libdir=\"${CT_PREFIX_DIR}/lib64\" --with-build-time-tools=\"${CT_PREFIX_DIR}/${CT_TARGET}/bin\"" +CT_CC_GCC_MULTILIB_LIST="lp64" +CT_CC_GCC_STATIC_LIBSTDCXX=y +# CT_CC_GCC_SYSTEM_ZLIB is not set +CT_CC_GCC_CONFIG_TLS=m + +# +# Optimisation features +# +CT_CC_GCC_USE_GRAPHITE=y +CT_CC_GCC_USE_LTO=y +CT_CC_GCC_LTO_ZSTD=m + +# +# Settings for libraries running on target +# +CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y +# CT_CC_GCC_LIBMUDFLAP is not set +CT_CC_GCC_LIBGOMP=y +# CT_CC_GCC_LIBSSP is not set +# CT_CC_GCC_LIBQUADMATH is not set +# CT_CC_GCC_LIBSANITIZER is not set + +# +# Misc. obscure options. +# +CT_CC_CXA_ATEXIT=y +CT_CC_GCC_TM_CLONE_REGISTRY=m +# CT_CC_GCC_DISABLE_PCH is not set +CT_CC_GCC_SJLJ_EXCEPTIONS=m +CT_CC_GCC_LDBL_128=m +# CT_CC_GCC_BUILD_ID is not set +CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y +# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set +# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set +# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set +CT_CC_GCC_LNK_HASH_STYLE="" +CT_CC_GCC_DEC_FLOATS_AUTO=y +# CT_CC_GCC_DEC_FLOATS_BID is not set +# CT_CC_GCC_DEC_FLOATS_DPD is not set +# CT_CC_GCC_DEC_FLOATS_NO is not set +CT_CC_GCC_DEC_FLOATS="" +CT_ALL_CC_CHOICES="GCC" + +# +# Additional supported languages: +# +CT_CC_LANG_CXX=y +CT_CC_LANG_FORTRAN=y +# CT_CC_LANG_ADA is not set +# CT_CC_LANG_OBJC is not set +# CT_CC_LANG_OBJCXX is not set +# CT_CC_LANG_GOLANG is not set +CT_CC_LANG_OTHERS="" +# end of C compiler + +# +# Debug facilities +# +# CT_DEBUG_DUMA is not set +CT_DEBUG_GDB=y +CT_DEBUG_GDB_PKG_KSYM="GDB" +CT_GDB_DIR_NAME="gdb" +CT_GDB_PKG_NAME="gdb" +# CT_GDB_SRC_RELEASE is not set +# CT_GDB_SRC_DEVEL is not set +CT_GDB_SRC_CUSTOM=y +CT_GDB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gdb/gdb-11.1" +CT_GDB_PATCH_GLOBAL=y +# CT_GDB_PATCH_BUNDLED is not set +# CT_GDB_PATCH_LOCAL is not set +# CT_GDB_PATCH_BUNDLED_LOCAL is not set +# CT_GDB_PATCH_LOCAL_BUNDLED is not set +# CT_GDB_PATCH_NONE is not set +CT_GDB_PATCH_ORDER="global" +# CT_GDB_VERY_NEW is not set +CT_GDB_V_11=y +# CT_GDB_V_10 is not set +# CT_GDB_V_9 is not set +# CT_GDB_V_8_3 is not set +CT_GDB_VERSION="11.2" +CT_GDB_MIRRORS="$(CT_Mirrors GNU gdb) $(CT_Mirrors sourceware gdb/releases)" +CT_GDB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GDB_SIGNATURE_FORMAT="" +CT_GDB_later_than_11=y +CT_GDB_11_or_later=y +CT_GDB_later_than_10=y +CT_GDB_10_or_later=y +CT_GDB_later_than_8_3=y +CT_GDB_8_3_or_later=y +CT_GDB_later_than_8_0=y +CT_GDB_8_0_or_later=y +CT_GDB_later_than_7_12=y +CT_GDB_7_12_or_later=y +CT_GDB_later_than_7_11=y +CT_GDB_7_11_or_later=y +CT_GDB_CROSS=y +# CT_GDB_CROSS_STATIC is not set +# CT_GDB_CROSS_SIM is not set +CT_GDB_CROSS_PYTHON=y +CT_GDB_CROSS_PYTHON_BINARY="" +CT_GDB_CROSS_EXTRA_CONFIG_ARRAY="" +# CT_GDB_NATIVE is not set +CT_GDB_GDBSERVER=y +# CT_GDB_NATIVE_BUILD_IPA_LIB is not set +# CT_GDB_NATIVE_STATIC is not set +# CT_GDB_NATIVE_STATIC_LIBSTDCXX is not set +CT_GDB_GDBSERVER_TOPLEVEL=y +# CT_DEBUG_LTRACE is not set +# CT_DEBUG_STRACE is not set +CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" +# end of Debug facilities + +# +# Companion libraries +# +# CT_COMPLIBS_CHECK is not set +# CT_COMP_LIBS_CLOOG is not set +CT_COMP_LIBS_EXPAT=y +CT_COMP_LIBS_EXPAT_PKG_KSYM="EXPAT" +CT_EXPAT_DIR_NAME="expat" +CT_EXPAT_PKG_NAME="expat" +# CT_EXPAT_SRC_RELEASE is not set +# CT_EXPAT_SRC_DEVEL is not set +CT_EXPAT_SRC_CUSTOM=y +CT_EXPAT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/expat/expat-2.4.8" +CT_EXPAT_PATCH_GLOBAL=y +# CT_EXPAT_PATCH_BUNDLED is not set +# CT_EXPAT_PATCH_LOCAL is not set +# CT_EXPAT_PATCH_BUNDLED_LOCAL is not set +# CT_EXPAT_PATCH_LOCAL_BUNDLED is not set +# CT_EXPAT_PATCH_NONE is not set +CT_EXPAT_PATCH_ORDER="global" +CT_EXPAT_VERY_NEW=y +# CT_EXPAT_V_2_4 is not set +CT_EXPAT_VERSION="new" +CT_EXPAT_MIRRORS="http://downloads.sourceforge.net/project/expat/expat/${CT_EXPAT_VERSION} https://github.com/libexpat/libexpat/releases/download/R_${CT_EXPAT_VERSION//./_}" +CT_EXPAT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2 .tar.gz" +CT_EXPAT_SIGNATURE_FORMAT="" +CT_COMP_LIBS_GETTEXT=y +CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" +CT_GETTEXT_DIR_NAME="gettext" +CT_GETTEXT_PKG_NAME="gettext" +# CT_GETTEXT_SRC_RELEASE is not set +# CT_GETTEXT_SRC_DEVEL is not set +CT_GETTEXT_SRC_CUSTOM=y +CT_GETTEXT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gettext/gettext-0.21" +CT_GETTEXT_PATCH_GLOBAL=y +# CT_GETTEXT_PATCH_BUNDLED is not set +# CT_GETTEXT_PATCH_LOCAL is not set +# CT_GETTEXT_PATCH_BUNDLED_LOCAL is not set +# CT_GETTEXT_PATCH_LOCAL_BUNDLED is not set +# CT_GETTEXT_PATCH_NONE is not set +CT_GETTEXT_PATCH_ORDER="global" +# CT_GETTEXT_VERY_NEW is not set +CT_GETTEXT_V_0_21=y +# CT_GETTEXT_V_0_20_1 is not set +# CT_GETTEXT_V_0_19_8_1 is not set +CT_GETTEXT_VERSION="0.21" +CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" +CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" +CT_GETTEXT_0_21_or_later=y +CT_GETTEXT_0_21_or_older=y +CT_GETTEXT_INCOMPATIBLE_WITH_UCLIBC_NG=y + +# +# This version of gettext is not compatible with uClibc-NG. Select +# + +# +# a different version if uClibc-NG is used on the target or (in a +# + +# +# Canadian cross build) on the host. +# +CT_COMP_LIBS_GMP=y +CT_COMP_LIBS_GMP_PKG_KSYM="GMP" +CT_GMP_DIR_NAME="gmp" +CT_GMP_PKG_NAME="gmp" +# CT_GMP_SRC_RELEASE is not set +# CT_GMP_SRC_DEVEL is not set +CT_GMP_SRC_CUSTOM=y +CT_GMP_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gmp/gmp-6.2.1" +CT_GMP_PATCH_GLOBAL=y +# CT_GMP_PATCH_BUNDLED is not set +# CT_GMP_PATCH_LOCAL is not set +# CT_GMP_PATCH_BUNDLED_LOCAL is not set +# CT_GMP_PATCH_LOCAL_BUNDLED is not set +# CT_GMP_PATCH_NONE is not set +CT_GMP_PATCH_ORDER="global" +# CT_GMP_VERY_NEW is not set +CT_GMP_V_6_2=y +# CT_GMP_V_6_1 is not set +CT_GMP_VERSION="6.2.1" +CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" +CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" +CT_GMP_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_ISL=y +CT_COMP_LIBS_ISL_PKG_KSYM="ISL" +CT_ISL_DIR_NAME="isl" +CT_ISL_PKG_NAME="isl" +# CT_ISL_SRC_RELEASE is not set +# CT_ISL_SRC_DEVEL is not set +CT_ISL_SRC_CUSTOM=y +CT_ISL_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/isl/isl-0.16.1" +CT_ISL_PATCH_GLOBAL=y +# CT_ISL_PATCH_BUNDLED is not set +# CT_ISL_PATCH_LOCAL is not set +# CT_ISL_PATCH_BUNDLED_LOCAL is not set +# CT_ISL_PATCH_LOCAL_BUNDLED is not set +# CT_ISL_PATCH_NONE is not set +CT_ISL_PATCH_ORDER="global" +# CT_ISL_VERY_NEW is not set +# CT_ISL_V_0_24 is not set +# CT_ISL_V_0_23 is not set +# CT_ISL_V_0_22 is not set +# CT_ISL_V_0_21 is not set +# CT_ISL_V_0_20 is not set +# CT_ISL_V_0_19 is not set +# CT_ISL_V_0_18 is not set +# CT_ISL_V_0_17 is not set +CT_ISL_V_0_16=y +# CT_ISL_V_0_15 is not set +CT_ISL_VERSION="0.16.1" +CT_ISL_MIRRORS="https://libisl.sourceforge.io" +CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_ISL_SIGNATURE_FORMAT="" +CT_ISL_0_18_or_older=y +CT_ISL_older_than_0_18=y +CT_ISL_later_than_0_15=y +CT_ISL_0_15_or_later=y +# CT_COMP_LIBS_LIBELF is not set +CT_COMP_LIBS_LIBICONV=y +CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" +CT_LIBICONV_DIR_NAME="libiconv" +CT_LIBICONV_PKG_NAME="libiconv" +# CT_LIBICONV_SRC_RELEASE is not set +# CT_LIBICONV_SRC_DEVEL is not set +CT_LIBICONV_SRC_CUSTOM=y +CT_LIBICONV_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libiconv/libiconv-1.16" +CT_LIBICONV_PATCH_GLOBAL=y +# CT_LIBICONV_PATCH_BUNDLED is not set +# CT_LIBICONV_PATCH_LOCAL is not set +# CT_LIBICONV_PATCH_BUNDLED_LOCAL is not set +# CT_LIBICONV_PATCH_LOCAL_BUNDLED is not set +# CT_LIBICONV_PATCH_NONE is not set +CT_LIBICONV_PATCH_ORDER="global" +# CT_LIBICONV_VERY_NEW is not set +CT_LIBICONV_V_1_16=y +# CT_LIBICONV_V_1_15 is not set +CT_LIBICONV_VERSION="1.16" +CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" +CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" +CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPC=y +CT_COMP_LIBS_MPC_PKG_KSYM="MPC" +CT_MPC_DIR_NAME="mpc" +CT_MPC_PKG_NAME="mpc" +# CT_MPC_SRC_RELEASE is not set +# CT_MPC_SRC_DEVEL is not set +CT_MPC_SRC_CUSTOM=y +CT_MPC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libmpc/mpc-1.2.0" +CT_MPC_PATCH_GLOBAL=y +# CT_MPC_PATCH_BUNDLED is not set +# CT_MPC_PATCH_LOCAL is not set +# CT_MPC_PATCH_BUNDLED_LOCAL is not set +# CT_MPC_PATCH_LOCAL_BUNDLED is not set +# CT_MPC_PATCH_NONE is not set +CT_MPC_PATCH_ORDER="global" +# CT_MPC_VERY_NEW is not set +CT_MPC_V_1_2=y +# CT_MPC_V_1_1 is not set +# CT_MPC_V_1_0 is not set +CT_MPC_VERSION="1.2.1" +CT_MPC_MIRRORS="http://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" +CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_FORMATS=".tar.gz" +CT_MPC_SIGNATURE_FORMAT="packed/.sig" +CT_MPC_later_than_1_1_0=y +CT_MPC_1_1_0_or_later=y +CT_COMP_LIBS_MPFR=y +CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" +CT_MPFR_DIR_NAME="mpfr" +CT_MPFR_PKG_NAME="mpfr" +# CT_MPFR_SRC_RELEASE is not set +# CT_MPFR_SRC_DEVEL is not set +CT_MPFR_SRC_CUSTOM=y +CT_MPFR_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/mpfr/mpfr-4.1.0" +CT_MPFR_PATCH_GLOBAL=y +# CT_MPFR_PATCH_BUNDLED is not set +# CT_MPFR_PATCH_LOCAL is not set +# CT_MPFR_PATCH_BUNDLED_LOCAL is not set +# CT_MPFR_PATCH_LOCAL_BUNDLED is not set +# CT_MPFR_PATCH_NONE is not set +CT_MPFR_PATCH_ORDER="global" +# CT_MPFR_VERY_NEW is not set +CT_MPFR_V_4_1=y +# CT_MPFR_V_4_0 is not set +# CT_MPFR_V_3_1 is not set +CT_MPFR_VERSION="4.1.0" +CT_MPFR_MIRRORS="http://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" +CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" +CT_MPFR_SIGNATURE_FORMAT="packed/.asc" +CT_MPFR_later_than_4_0_0=y +CT_MPFR_4_0_0_or_later=y +CT_COMP_LIBS_NCURSES=y +CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" +CT_NCURSES_DIR_NAME="ncurses" +CT_NCURSES_PKG_NAME="ncurses" +# CT_NCURSES_SRC_RELEASE is not set +# CT_NCURSES_SRC_DEVEL is not set +CT_NCURSES_SRC_CUSTOM=y +CT_NCURSES_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/ncurses/ncurses-6.3" +CT_NCURSES_PATCH_GLOBAL=y +# CT_NCURSES_PATCH_BUNDLED is not set +# CT_NCURSES_PATCH_LOCAL is not set +# CT_NCURSES_PATCH_BUNDLED_LOCAL is not set +# CT_NCURSES_PATCH_LOCAL_BUNDLED is not set +# CT_NCURSES_PATCH_NONE is not set +CT_NCURSES_PATCH_ORDER="global" +CT_NCURSES_VERY_NEW=y +# CT_NCURSES_V_6_2 is not set +# CT_NCURSES_V_6_1 is not set +# CT_NCURSES_V_6_0 is not set +CT_NCURSES_VERSION="new" +CT_NCURSES_MIRRORS="https://invisible-mirror.net/archives/ncurses $(CT_Mirrors GNU ncurses)" +CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" +CT_NCURSES_SIGNATURE_FORMAT="packed/.sig" +CT_NCURSES_NEW_ABI=y +CT_NCURSES_HOST_CONFIG_ARGS="" +CT_NCURSES_HOST_DISABLE_DB=y +CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" +CT_NCURSES_TARGET_CONFIG_ARGS="" +# CT_NCURSES_TARGET_DISABLE_DB is not set +CT_NCURSES_TARGET_FALLBACKS="" +CT_COMP_LIBS_ZLIB=y +CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" +CT_ZLIB_DIR_NAME="zlib" +CT_ZLIB_PKG_NAME="zlib" +# CT_ZLIB_SRC_RELEASE is not set +# CT_ZLIB_SRC_DEVEL is not set +CT_ZLIB_SRC_CUSTOM=y +CT_ZLIB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/zlib/zlib-1.2.11" +CT_ZLIB_PATCH_GLOBAL=y +# CT_ZLIB_PATCH_BUNDLED is not set +# CT_ZLIB_PATCH_LOCAL is not set +# CT_ZLIB_PATCH_BUNDLED_LOCAL is not set +# CT_ZLIB_PATCH_LOCAL_BUNDLED is not set +# CT_ZLIB_PATCH_NONE is not set +CT_ZLIB_PATCH_ORDER="global" +# CT_ZLIB_VERY_NEW is not set +CT_ZLIB_V_1_2_12=y +CT_ZLIB_VERSION="1.2.12" +CT_ZLIB_MIRRORS="http://downloads.sourceforge.net/project/libpng/zlib/${CT_ZLIB_VERSION} https://www.zlib.net/" +CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" +CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP GNUPRUMCU ISL LIBELF LIBICONV MPC MPFR NCURSES NEWLIB_NANO PICOLIBC ZLIB" +CT_LIBICONV_NEEDED=y +CT_GETTEXT_NEEDED=y +CT_GMP_NEEDED=y +CT_MPFR_NEEDED=y +CT_ISL_NEEDED=y +CT_MPC_NEEDED=y +CT_EXPAT_NEEDED=y +CT_NCURSES_NEEDED=y +CT_ZLIB_NEEDED=y +CT_LIBICONV=y +CT_GETTEXT=y +CT_GMP=y +CT_MPFR=y +CT_ISL=y +CT_MPC=y +CT_EXPAT=y +CT_NCURSES=y +CT_ZLIB=y +# end of Companion libraries + +# +# Companion tools +# +# CT_COMP_TOOLS_FOR_HOST is not set +# CT_COMP_TOOLS_AUTOCONF is not set +# CT_COMP_TOOLS_AUTOMAKE is not set +# CT_COMP_TOOLS_BISON is not set +# CT_COMP_TOOLS_DTC is not set +# CT_COMP_TOOLS_LIBTOOL is not set +# CT_COMP_TOOLS_M4 is not set +# CT_COMP_TOOLS_MAKE is not set +CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" +# end of Companion tools + +# +# Test suite +# +# CT_TEST_SUITE_GCC is not set +# end of Test suite diff --git a/cross_tools/configs/config_arm32 b/cross_tools/configs/config_arm32 new file mode 100644 index 0000000000000000000000000000000000000000..c5e3c3a7151f9d9106972c4927323617c93ecb39 --- /dev/null +++ b/cross_tools/configs/config_arm32 @@ -0,0 +1,1061 @@ +# +# Automatically generated file; DO NOT EDIT. +# crosstool-NG 1.25.0 Configuration +# +CT_CONFIGURE_has_static_link=y +CT_CONFIGURE_has_cxx11=y +CT_CONFIGURE_has_curl=y +CT_CONFIGURE_has_rsync=y +CT_CONFIGURE_has_make_3_81_or_newer=y +CT_CONFIGURE_has_make_4_0_or_newer=y +CT_CONFIGURE_has_libtool_2_4_or_newer=y +CT_CONFIGURE_has_libtoolize_2_4_or_newer=y +CT_CONFIGURE_has_autoconf_2_65_or_newer=y +CT_CONFIGURE_has_autoreconf_2_65_or_newer=y +CT_CONFIGURE_has_automake_1_15_or_newer=y +CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y +CT_CONFIGURE_has_python_3_4_or_newer=y +CT_CONFIGURE_has_bison_2_7_or_newer=y +CT_CONFIGURE_has_python=y +CT_CONFIGURE_has_git=y +CT_CONFIGURE_has_md5sum=y +CT_CONFIGURE_has_sha1sum=y +CT_CONFIGURE_has_sha256sum=y +CT_CONFIGURE_has_sha512sum=y +CT_CONFIGURE_has_install_with_strip_program=y +CT_VERSION="1.25.0" +CT_VCHECK="" +CT_CONFIG_VERSION_ENV="4" +CT_CONFIG_VERSION_CURRENT="4" +CT_CONFIG_VERSION="4" +CT_MODULES=y + +# +# Paths and misc options +# + +# +# crosstool-NG behavior +# +# CT_OBSOLETE is not set +CT_EXPERIMENTAL=y +# CT_ALLOW_BUILD_AS_ROOT is not set +# CT_DEBUG_CT is not set + +# +# Paths +# +CT_LOCAL_TARBALLS_DIR="${HOME}/src" +CT_SAVE_TARBALLS=y +# CT_TARBALLS_BUILDROOT_LAYOUT is not set +CT_WORK_DIR="${CT_TOP_DIR}/.build" +CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_BUILD_DIR="${CT_BUILD_TOP_DIR}/build" +CT_PREFIX_DIR="${CT_PREFIX:-${HOME}/x-tools}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_RM_RF_PREFIX_DIR=y +# CT_REMOVE_DOCS is not set +# CT_BUILD_MANUALS is not set +CT_INSTALL_LICENSES=y +CT_PREFIX_DIR_RO=y +CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y +# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set + +# +# Downloading +# +CT_DOWNLOAD_AGENT_CURL=y +# CT_DOWNLOAD_AGENT_NONE is not set +# CT_FORBID_DOWNLOAD is not set +# CT_FORCE_DOWNLOAD is not set +CT_CONNECT_TIMEOUT=10 +CT_DOWNLOAD_CURL_OPTIONS="--location --ftp-pasv --retry 3 --fail --silent" +# CT_ONLY_DOWNLOAD is not set +# CT_USE_MIRROR is not set +CT_VERIFY_DOWNLOAD_DIGEST=y +CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y +# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set +CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" +# CT_VERIFY_DOWNLOAD_SIGNATURE is not set + +# +# Extracting +# +# CT_FORCE_EXTRACT is not set +CT_OVERRIDE_CONFIG_GUESS_SUB=y +# CT_ONLY_EXTRACT is not set +CT_PATCH_BUNDLED=y +# CT_PATCH_LOCAL is not set +# CT_PATCH_BUNDLED_LOCAL is not set +# CT_PATCH_LOCAL_BUNDLED is not set +# CT_PATCH_NONE is not set +CT_PATCH_ORDER="bundled" + +# +# Build behavior +# +CT_PARALLEL_JOBS=0 +CT_LOAD="" +CT_USE_PIPES=y +CT_EXTRA_CFLAGS_FOR_BUILD="" +CT_EXTRA_CXXFLAGS_FOR_BUILD="" +CT_EXTRA_LDFLAGS_FOR_BUILD="" +CT_EXTRA_CFLAGS_FOR_HOST="" +CT_EXTRA_LDFLAGS_FOR_HOST="" +# CT_CONFIG_SHELL_SH is not set +# CT_CONFIG_SHELL_ASH is not set +CT_CONFIG_SHELL_BASH=y +# CT_CONFIG_SHELL_CUSTOM is not set +CT_CONFIG_SHELL="${bash}" + +# +# Logging +# +# CT_LOG_ERROR is not set +# CT_LOG_WARN is not set +# CT_LOG_INFO is not set +CT_LOG_EXTRA=y +# CT_LOG_ALL is not set +# CT_LOG_DEBUG is not set +CT_LOG_LEVEL_MAX="EXTRA" +# CT_LOG_SEE_TOOLS_WARN is not set +CT_LOG_PROGRESS_BAR=y +CT_LOG_TO_FILE=y +CT_LOG_FILE_COMPRESS=y +# end of Paths and misc options + +# +# Target options +# +# CT_ARCH_ALPHA is not set +# CT_ARCH_ARC is not set +CT_ARCH_ARM=y +# CT_ARCH_AVR is not set +# CT_ARCH_C6X is not set +# CT_ARCH_M68K is not set +# CT_ARCH_MICROBLAZE is not set +# CT_ARCH_MIPS is not set +# CT_ARCH_MOXIE is not set +# CT_ARCH_MSP430 is not set +# CT_ARCH_NIOS2 is not set +# CT_ARCH_POWERPC is not set +# CT_ARCH_PRU is not set +# CT_ARCH_RISCV is not set +# CT_ARCH_S390 is not set +# CT_ARCH_SH is not set +# CT_ARCH_SPARC is not set +# CT_ARCH_X86 is not set +# CT_ARCH_XTENSA is not set +CT_ARCH="arm" +CT_ARCH_CHOICE_KSYM="ARM" +CT_ARCH_CPU="" +CT_ARCH_TUNE="" +CT_ARCH_ARM_SHOW=y + +# +# Options for arm +# +CT_ARCH_ARM_PKG_KSYM="" +CT_ARCH_ARM_MODE="arm" +CT_ARCH_ARM_MODE_ARM=y +# CT_ARCH_ARM_MODE_THUMB is not set +# CT_ARCH_ARM_INTERWORKING is not set +CT_ARCH_ARM_EABI_FORCE=y +CT_ARCH_ARM_EABI=y +CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR C6X M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 POWERPC PRU RISCV S390 SH SPARC X86 XTENSA" +CT_ARCH_SUFFIX="" +# CT_OMIT_TARGET_VENDOR is not set + +# +# Generic target options +# +# CT_MULTILIB is not set +# CT_DEMULTILIB is not set +CT_ARCH_SUPPORTS_BOTH_MMU=y +CT_ARCH_DEFAULT_HAS_MMU=y +CT_ARCH_USE_MMU=y +CT_ARCH_SUPPORTS_FLAT_FORMAT=y +CT_ARCH_SUPPORTS_EITHER_ENDIAN=y +CT_ARCH_DEFAULT_LE=y +# CT_ARCH_BE is not set +CT_ARCH_LE=y +CT_ARCH_ENDIAN="little" +CT_ARCH_SUPPORTS_32=y +CT_ARCH_SUPPORTS_64=y +CT_ARCH_DEFAULT_32=y +CT_ARCH_BITNESS=32 +CT_ARCH_32=y +# CT_ARCH_64 is not set + +# +# Target optimisations +# +CT_ARCH_SUPPORTS_WITH_ARCH=y +CT_ARCH_SUPPORTS_WITH_CPU=y +CT_ARCH_SUPPORTS_WITH_TUNE=y +CT_ARCH_SUPPORTS_WITH_FLOAT=y +CT_ARCH_SUPPORTS_WITH_FPU=y +CT_ARCH_SUPPORTS_SOFTFP=y +CT_ARCH_EXCLUSIVE_WITH_CPU=y +CT_ARCH_ARCH="" +CT_ARCH_FPU="" +# CT_ARCH_FLOAT_AUTO is not set +# CT_ARCH_FLOAT_HW is not set +# CT_ARCH_FLOAT_SOFTFP is not set +CT_ARCH_FLOAT_SW=y +CT_TARGET_CFLAGS="" +CT_TARGET_LDFLAGS="" +CT_ARCH_FLOAT="soft" +# end of Target options + +# +# Toolchain options +# + +# +# General toolchain options +# +CT_FORCE_SYSROOT=y +CT_USE_SYSROOT=y +CT_SYSROOT_NAME="sysroot" +CT_SYSROOT_DIR_PREFIX="" +CT_WANTS_STATIC_LINK=y +CT_WANTS_STATIC_LINK_CXX=y +# CT_STATIC_TOOLCHAIN is not set +CT_SHOW_CT_VERSION=y +CT_TOOLCHAIN_PKGVERSION="" +CT_TOOLCHAIN_BUGURL="" + +# +# Tuple completion and aliasing +# +CT_TARGET_VENDOR="openeuler" +CT_TARGET_ALIAS_SED_EXPR="" +CT_TARGET_ALIAS="" + +# +# Toolchain type +# +# CT_NATIVE is not set +CT_CROSS=y +# CT_CROSS_NATIVE is not set +# CT_CANADIAN is not set +CT_TOOLCHAIN_TYPE="cross" + +# +# Build system +# +CT_BUILD="" +CT_BUILD_PREFIX="" +CT_BUILD_SUFFIX="" + +# +# Misc options +# +# CT_TOOLCHAIN_ENABLE_NLS is not set +# end of Toolchain options + +# +# Operating System +# +CT_KERNEL_SUPPORTS_SHARED_LIBS=y +# CT_KERNEL_BARE_METAL is not set +CT_KERNEL_LINUX=y +CT_KERNEL="linux" +CT_KERNEL_CHOICE_KSYM="LINUX" +CT_KERNEL_LINUX_SHOW=y + +# +# Options for linux +# +CT_KERNEL_LINUX_PKG_KSYM="LINUX" +CT_LINUX_DIR_NAME="linux" +CT_LINUX_USE_WWW_KERNEL_ORG=y +# CT_LINUX_USE_ORACLE is not set +CT_LINUX_USE="LINUX" +CT_LINUX_PKG_NAME="linux" +# CT_LINUX_SRC_RELEASE is not set +# CT_LINUX_SRC_DEVEL is not set +CT_LINUX_SRC_CUSTOM=y +CT_LINUX_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/kernel" +CT_LINUX_PATCH_GLOBAL=y +# CT_LINUX_PATCH_BUNDLED is not set +# CT_LINUX_PATCH_LOCAL is not set +# CT_LINUX_PATCH_BUNDLED_LOCAL is not set +# CT_LINUX_PATCH_LOCAL_BUNDLED is not set +# CT_LINUX_PATCH_NONE is not set +CT_LINUX_PATCH_ORDER="global" +# CT_LINUX_VERY_NEW is not set +# CT_LINUX_V_5_16 is not set +# CT_LINUX_V_5_15 is not set +# CT_LINUX_V_5_14 is not set +# CT_LINUX_V_5_13 is not set +# CT_LINUX_V_5_12 is not set +# CT_LINUX_V_5_11 is not set +CT_LINUX_V_5_10=y +# CT_LINUX_V_5_9 is not set +# CT_LINUX_V_5_8 is not set +# CT_LINUX_V_5_7 is not set +# CT_LINUX_V_5_4 is not set +# CT_LINUX_V_5_3 is not set +# CT_LINUX_V_5_2 is not set +# CT_LINUX_V_5_1 is not set +# CT_LINUX_V_5_0 is not set +# CT_LINUX_V_4_20 is not set +# CT_LINUX_V_4_19 is not set +# CT_LINUX_V_4_18 is not set +# CT_LINUX_V_4_17 is not set +# CT_LINUX_V_4_16 is not set +# CT_LINUX_V_4_15 is not set +# CT_LINUX_V_4_14 is not set +# CT_LINUX_V_4_13 is not set +# CT_LINUX_V_4_12 is not set +# CT_LINUX_V_4_11 is not set +# CT_LINUX_V_4_10 is not set +# CT_LINUX_V_4_9 is not set +# CT_LINUX_V_4_4 is not set +# CT_LINUX_V_4_1 is not set +# CT_LINUX_V_3_16 is not set +# CT_LINUX_V_3_13 is not set +# CT_LINUX_V_3_12 is not set +# CT_LINUX_V_3_10 is not set +# CT_LINUX_V_3_4 is not set +# CT_LINUX_V_3_2 is not set +CT_LINUX_VERSION="5.10.100" +CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" +CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" +CT_LINUX_5_12_or_older=y +CT_LINUX_older_than_5_12=y +CT_LINUX_later_than_5_3=y +CT_LINUX_5_3_or_later=y +CT_LINUX_later_than_4_8=y +CT_LINUX_4_8_or_later=y +CT_LINUX_later_than_3_7=y +CT_LINUX_3_7_or_later=y +CT_LINUX_later_than_3_2=y +CT_LINUX_3_2_or_later=y +CT_LINUX_REQUIRE_3_2_or_later=y +CT_KERNEL_DEP_RSYNC=y +CT_KERNEL_LINUX_VERBOSITY_0=y +# CT_KERNEL_LINUX_VERBOSITY_1 is not set +# CT_KERNEL_LINUX_VERBOSITY_2 is not set +CT_KERNEL_LINUX_VERBOSE_LEVEL=0 +CT_KERNEL_LINUX_INSTALL_CHECK=y +CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" + +# +# Common kernel options +# +CT_SHARED_LIBS=y +# end of Operating System + +# +# Binary utilities +# +CT_ARCH_BINFMT_ELF=y +CT_BINUTILS_BINUTILS=y +CT_BINUTILS="binutils" +CT_BINUTILS_CHOICE_KSYM="BINUTILS" +CT_BINUTILS_BINUTILS_SHOW=y + +# +# Options for binutils +# +CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" +CT_BINUTILS_DIR_NAME="binutils" +CT_BINUTILS_USE_GNU=y +# CT_BINUTILS_USE_LINARO is not set +# CT_BINUTILS_USE_ORACLE is not set +CT_BINUTILS_USE="BINUTILS" +CT_BINUTILS_PKG_NAME="binutils" +# CT_BINUTILS_SRC_RELEASE is not set +# CT_BINUTILS_SRC_DEVEL is not set +CT_BINUTILS_SRC_CUSTOM=y +CT_BINUTILS_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/binutils/binutils-2.37" +CT_BINUTILS_PATCH_GLOBAL=y +# CT_BINUTILS_PATCH_BUNDLED is not set +# CT_BINUTILS_PATCH_LOCAL is not set +# CT_BINUTILS_PATCH_BUNDLED_LOCAL is not set +# CT_BINUTILS_PATCH_LOCAL_BUNDLED is not set +# CT_BINUTILS_PATCH_NONE is not set +CT_BINUTILS_PATCH_ORDER="global" +# CT_BINUTILS_VERY_NEW is not set +CT_BINUTILS_V_2_38=y +# CT_BINUTILS_V_2_37 is not set +# CT_BINUTILS_V_2_36 is not set +# CT_BINUTILS_V_2_35 is not set +# CT_BINUTILS_V_2_34 is not set +# CT_BINUTILS_V_2_33 is not set +# CT_BINUTILS_V_2_32 is not set +# CT_BINUTILS_V_2_31 is not set +# CT_BINUTILS_V_2_30 is not set +# CT_BINUTILS_V_2_29 is not set +# CT_BINUTILS_V_2_28 is not set +# CT_BINUTILS_V_2_27 is not set +# CT_BINUTILS_V_2_26 is not set +CT_BINUTILS_VERSION="2.38" +CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" +CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" +CT_BINUTILS_later_than_2_30=y +CT_BINUTILS_2_30_or_later=y +CT_BINUTILS_later_than_2_27=y +CT_BINUTILS_2_27_or_later=y +CT_BINUTILS_later_than_2_26=y +CT_BINUTILS_2_26_or_later=y + +# +# GNU binutils +# +CT_BINUTILS_GOLD_SUPPORTS_ARCH=y +CT_BINUTILS_GOLD_SUPPORT=y +CT_BINUTILS_FORCE_LD_BFD_DEFAULT=y +# CT_BINUTILS_LINKER_LD is not set +CT_BINUTILS_LINKER_LD_GOLD=y +CT_BINUTILS_GOLD_INSTALLED=y +CT_BINUTILS_GOLD_THREADS=y +CT_BINUTILS_LINKER_BOTH=y +CT_BINUTILS_LINKERS_LIST="ld,gold" +CT_BINUTILS_LD_WRAPPER=y +CT_BINUTILS_LINKER_DEFAULT="bfd" +CT_BINUTILS_PLUGINS=y +CT_BINUTILS_RELRO=m +CT_BINUTILS_DETERMINISTIC_ARCHIVES=y +CT_BINUTILS_EXTRA_CONFIG_ARRAY="" +# CT_BINUTILS_FOR_TARGET is not set +CT_ALL_BINUTILS_CHOICES="BINUTILS" +# end of Binary utilities + +# +# C-library +# +CT_LIBC_GLIBC=y +# CT_LIBC_MUSL is not set +# CT_LIBC_UCLIBC_NG is not set +CT_LIBC="glibc" +CT_LIBC_CHOICE_KSYM="GLIBC" +CT_THREADS="nptl" +CT_LIBC_GLIBC_SHOW=y + +# +# Options for glibc +# +CT_LIBC_GLIBC_PKG_KSYM="GLIBC" +CT_GLIBC_DIR_NAME="glibc" +CT_GLIBC_USE_GNU=y +# CT_GLIBC_USE_ORACLE is not set +CT_GLIBC_USE="GLIBC" +CT_GLIBC_PKG_NAME="glibc" +# CT_GLIBC_SRC_RELEASE is not set +# CT_GLIBC_SRC_DEVEL is not set +CT_GLIBC_SRC_CUSTOM=y +CT_GLIBC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/glibc/glibc-2.36" +CT_GLIBC_PATCH_GLOBAL=y +# CT_GLIBC_PATCH_BUNDLED is not set +# CT_GLIBC_PATCH_LOCAL is not set +# CT_GLIBC_PATCH_BUNDLED_LOCAL is not set +# CT_GLIBC_PATCH_LOCAL_BUNDLED is not set +# CT_GLIBC_PATCH_NONE is not set +CT_GLIBC_PATCH_ORDER="global" +# CT_GLIBC_VERY_NEW is not set +CT_GLIBC_V_2_35=y +# CT_GLIBC_V_2_34 is not set +# CT_GLIBC_V_2_33 is not set +# CT_GLIBC_V_2_32 is not set +# CT_GLIBC_V_2_31 is not set +# CT_GLIBC_V_2_30 is not set +# CT_GLIBC_V_2_29 is not set +# CT_GLIBC_V_2_28 is not set +# CT_GLIBC_V_2_27 is not set +# CT_GLIBC_V_2_26 is not set +# CT_GLIBC_V_2_25 is not set +# CT_GLIBC_V_2_24 is not set +# CT_GLIBC_V_2_23 is not set +# CT_GLIBC_V_2_19 is not set +# CT_GLIBC_V_2_17 is not set +CT_GLIBC_VERSION="2.35" +CT_GLIBC_MIRRORS="$(CT_Mirrors GNU glibc)" +CT_GLIBC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GLIBC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GLIBC_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_GLIBC_SIGNATURE_FORMAT="packed/.sig" +CT_GLIBC_later_than_2_34=y +CT_GLIBC_2_34_or_later=y +CT_GLIBC_later_than_2_32=y +CT_GLIBC_2_32_or_later=y +CT_GLIBC_later_than_2_31=y +CT_GLIBC_2_31_or_later=y +CT_GLIBC_later_than_2_30=y +CT_GLIBC_2_30_or_later=y +CT_GLIBC_later_than_2_29=y +CT_GLIBC_2_29_or_later=y +CT_GLIBC_later_than_2_28=y +CT_GLIBC_2_28_or_later=y +CT_GLIBC_later_than_2_27=y +CT_GLIBC_2_27_or_later=y +CT_GLIBC_later_than_2_26=y +CT_GLIBC_2_26_or_later=y +CT_GLIBC_later_than_2_25=y +CT_GLIBC_2_25_or_later=y +CT_GLIBC_later_than_2_24=y +CT_GLIBC_2_24_or_later=y +CT_GLIBC_later_than_2_23=y +CT_GLIBC_2_23_or_later=y +CT_GLIBC_later_than_2_20=y +CT_GLIBC_2_20_or_later=y +CT_GLIBC_later_than_2_17=y +CT_GLIBC_2_17_or_later=y +CT_GLIBC_later_than_2_14=y +CT_GLIBC_2_14_or_later=y +CT_GLIBC_DEP_KERNEL_HEADERS_VERSION=y +CT_GLIBC_DEP_BINUTILS=y +CT_GLIBC_DEP_GCC=y +CT_GLIBC_DEP_PYTHON=y +CT_GLIBC_BUILD_SSP=y +CT_GLIBC_HAS_LIBIDN_ADDON=y +# CT_GLIBC_USE_LIBIDN_ADDON is not set +CT_GLIBC_NO_SPARC_V8=y +CT_GLIBC_EXTRA_CONFIG_ARRAY="--enable-crypt" +CT_GLIBC_CONFIGPARMS="" +CT_GLIBC_EXTRA_CFLAGS="" +# CT_GLIBC_ENABLE_FORTIFIED_BUILD is not set +# CT_GLIBC_DISABLE_VERSIONING is not set +CT_GLIBC_OLDEST_ABI="" +CT_GLIBC_FORCE_UNWIND=y +# CT_GLIBC_LOCALES is not set +# CT_GLIBC_KERNEL_VERSION_NONE is not set +CT_GLIBC_KERNEL_VERSION_AS_HEADERS=y +# CT_GLIBC_KERNEL_VERSION_CHOSEN is not set +CT_GLIBC_MIN_KERNEL="5.10.100" +CT_GLIBC_SSP_DEFAULT=y +# CT_GLIBC_SSP_NO is not set +# CT_GLIBC_SSP_YES is not set +# CT_GLIBC_SSP_ALL is not set +# CT_GLIBC_SSP_STRONG is not set +CT_GLIBC_ENABLE_WERROR=y +# CT_GLIBC_ENABLE_COMMON_FLAG is not set +CT_ALL_LIBC_CHOICES="AVR_LIBC BIONIC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE UCLIBC_NG" +CT_LIBC_SUPPORT_THREADS_ANY=y +CT_LIBC_SUPPORT_THREADS_NATIVE=y + +# +# Common C library options +# +CT_THREADS_NATIVE=y +# CT_CREATE_LDSO_CONF is not set +CT_LIBC_XLDD=y +# end of C-library + +# +# C compiler +# +CT_CC_CORE_NEEDED=y +CT_CC_SUPPORT_CXX=y +CT_CC_SUPPORT_FORTRAN=y +CT_CC_SUPPORT_ADA=y +CT_CC_SUPPORT_OBJC=y +CT_CC_SUPPORT_OBJCXX=y +CT_CC_SUPPORT_GOLANG=y +CT_CC_GCC=y +CT_CC="gcc" +CT_CC_CHOICE_KSYM="GCC" +CT_CC_GCC_SHOW=y + +# +# Options for gcc +# +CT_CC_GCC_PKG_KSYM="GCC" +CT_GCC_DIR_NAME="gcc" +CT_GCC_USE_GNU=y +# CT_GCC_USE_LINARO is not set +CT_GCC_USE="GCC" +CT_GCC_PKG_NAME="gcc" +# CT_GCC_SRC_RELEASE is not set +# CT_GCC_SRC_DEVEL is not set +CT_GCC_SRC_CUSTOM=y +CT_GCC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gcc/gcc-10.3.0" +CT_GCC_PATCH_GLOBAL=y +# CT_GCC_PATCH_BUNDLED is not set +# CT_GCC_PATCH_LOCAL is not set +# CT_GCC_PATCH_BUNDLED_LOCAL is not set +# CT_GCC_PATCH_LOCAL_BUNDLED is not set +# CT_GCC_PATCH_NONE is not set +CT_GCC_PATCH_ORDER="global" +# CT_GCC_VERY_NEW is not set +# CT_GCC_V_11 is not set +CT_GCC_V_10=y +# CT_GCC_V_9 is not set +# CT_GCC_V_8 is not set +# CT_GCC_V_7 is not set +# CT_GCC_V_6 is not set +CT_GCC_VERSION="10.3.0" +CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" +CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GCC_SIGNATURE_FORMAT="" +CT_GCC_11_or_older=y +CT_GCC_older_than_11=y +CT_GCC_later_than_10=y +CT_GCC_10_or_later=y +CT_GCC_later_than_9=y +CT_GCC_9_or_later=y +CT_GCC_later_than_8=y +CT_GCC_8_or_later=y +CT_GCC_later_than_7=y +CT_GCC_7_or_later=y +CT_GCC_later_than_6=y +CT_GCC_6_or_later=y +CT_GCC_REQUIRE_6_or_later=y +CT_GCC_later_than_5=y +CT_GCC_5_or_later=y +CT_GCC_REQUIRE_5_or_later=y +CT_GCC_later_than_4_9=y +CT_GCC_4_9_or_later=y +CT_GCC_REQUIRE_4_9_or_later=y +CT_CC_GCC_ENABLE_PLUGINS=y +CT_CC_GCC_GOLD=y +CT_CC_GCC_HAS_LIBMPX=y +CT_CC_GCC_ENABLE_CXX_FLAGS="" +CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_EXTRA_CONFIG_ARRAY="--with-arch=armv7-a --with-gnu-as --with-gnu-ld --enable-c99 --enable-shared --enable-poison-system-directories --enable-symvers=gnu --disable-bootstrap --disable-libstdcxx-dual-abi --enable-default-pie --with-build-time-tools=\\\"${CT_PREFIX_DIR}/${CT_TARGET}/bin\\\"" +CT_CC_GCC_STATIC_LIBSTDCXX=y +# CT_CC_GCC_SYSTEM_ZLIB is not set +CT_CC_GCC_CONFIG_TLS=m + +# +# Optimisation features +# +CT_CC_GCC_USE_GRAPHITE=y +CT_CC_GCC_USE_LTO=y +CT_CC_GCC_LTO_ZSTD=m + +# +# Settings for libraries running on target +# +CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y +# CT_CC_GCC_LIBMUDFLAP is not set +# CT_CC_GCC_LIBGOMP is not set +# CT_CC_GCC_LIBSSP is not set +# CT_CC_GCC_LIBQUADMATH is not set +# CT_CC_GCC_LIBSANITIZER is not set + +# +# Misc. obscure options. +# +CT_CC_CXA_ATEXIT=y +CT_CC_GCC_TM_CLONE_REGISTRY=m +# CT_CC_GCC_DISABLE_PCH is not set +# CT_CC_GCC_SJLJ_EXCEPTIONS is not set +CT_CC_GCC_LDBL_128=m +# CT_CC_GCC_BUILD_ID is not set +CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y +# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set +# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set +# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set +CT_CC_GCC_LNK_HASH_STYLE="" +CT_CC_GCC_DEC_FLOATS_AUTO=y +# CT_CC_GCC_DEC_FLOATS_BID is not set +# CT_CC_GCC_DEC_FLOATS_DPD is not set +# CT_CC_GCC_DEC_FLOATS_NO is not set +CT_CC_GCC_DEC_FLOATS="" +CT_ALL_CC_CHOICES="GCC" + +# +# Additional supported languages: +# +CT_CC_LANG_CXX=y +CT_CC_LANG_FORTRAN=y +# CT_CC_LANG_ADA is not set +# CT_CC_LANG_OBJC is not set +# CT_CC_LANG_OBJCXX is not set +# CT_CC_LANG_GOLANG is not set +CT_CC_LANG_OTHERS="" +# end of C compiler + +# +# Debug facilities +# +# CT_DEBUG_DUMA is not set +CT_DEBUG_GDB=y +CT_DEBUG_GDB_PKG_KSYM="GDB" +CT_GDB_DIR_NAME="gdb" +CT_GDB_PKG_NAME="gdb" +# CT_GDB_SRC_RELEASE is not set +# CT_GDB_SRC_DEVEL is not set +CT_GDB_SRC_CUSTOM=y +CT_GDB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gdb/gdb-12.1" +CT_GDB_PATCH_GLOBAL=y +# CT_GDB_PATCH_BUNDLED is not set +# CT_GDB_PATCH_LOCAL is not set +# CT_GDB_PATCH_BUNDLED_LOCAL is not set +# CT_GDB_PATCH_LOCAL_BUNDLED is not set +# CT_GDB_PATCH_NONE is not set +CT_GDB_PATCH_ORDER="global" +# CT_GDB_VERY_NEW is not set +CT_GDB_V_11=y +# CT_GDB_V_10 is not set +# CT_GDB_V_9 is not set +# CT_GDB_V_8_3 is not set +CT_GDB_VERSION="11.2" +CT_GDB_MIRRORS="$(CT_Mirrors GNU gdb) $(CT_Mirrors sourceware gdb/releases)" +CT_GDB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GDB_SIGNATURE_FORMAT="" +CT_GDB_later_than_11=y +CT_GDB_11_or_later=y +CT_GDB_later_than_10=y +CT_GDB_10_or_later=y +CT_GDB_later_than_8_3=y +CT_GDB_8_3_or_later=y +CT_GDB_later_than_8_0=y +CT_GDB_8_0_or_later=y +CT_GDB_later_than_7_12=y +CT_GDB_7_12_or_later=y +CT_GDB_later_than_7_11=y +CT_GDB_7_11_or_later=y +CT_GDB_CROSS=y +# CT_GDB_CROSS_STATIC is not set +# CT_GDB_CROSS_SIM is not set +# CT_GDB_CROSS_PYTHON is not set +CT_GDB_CROSS_EXTRA_CONFIG_ARRAY="" +# CT_GDB_NATIVE is not set +CT_GDB_GDBSERVER=y +# CT_GDB_NATIVE_BUILD_IPA_LIB is not set +# CT_GDB_NATIVE_STATIC is not set +# CT_GDB_NATIVE_STATIC_LIBSTDCXX is not set +CT_GDB_GDBSERVER_TOPLEVEL=y +# CT_DEBUG_LTRACE is not set +# CT_DEBUG_STRACE is not set +CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" +# end of Debug facilities + +# +# Companion libraries +# +# CT_COMPLIBS_CHECK is not set +# CT_COMP_LIBS_CLOOG is not set +CT_COMP_LIBS_EXPAT=y +CT_COMP_LIBS_EXPAT_PKG_KSYM="EXPAT" +CT_EXPAT_DIR_NAME="expat" +CT_EXPAT_PKG_NAME="expat" +# CT_EXPAT_SRC_RELEASE is not set +# CT_EXPAT_SRC_DEVEL is not set +CT_EXPAT_SRC_CUSTOM=y +CT_EXPAT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/expat/expat-2.5.0" +CT_EXPAT_PATCH_GLOBAL=y +# CT_EXPAT_PATCH_BUNDLED is not set +# CT_EXPAT_PATCH_LOCAL is not set +# CT_EXPAT_PATCH_BUNDLED_LOCAL is not set +# CT_EXPAT_PATCH_LOCAL_BUNDLED is not set +# CT_EXPAT_PATCH_NONE is not set +CT_EXPAT_PATCH_ORDER="global" +CT_EXPAT_VERY_NEW=y +# CT_EXPAT_V_2_4 is not set +CT_EXPAT_VERSION="new" +CT_EXPAT_MIRRORS="http://downloads.sourceforge.net/project/expat/expat/${CT_EXPAT_VERSION} https://github.com/libexpat/libexpat/releases/download/R_${CT_EXPAT_VERSION//./_}" +CT_EXPAT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2 .tar.gz" +CT_EXPAT_SIGNATURE_FORMAT="" +CT_COMP_LIBS_GETTEXT=y +CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" +CT_GETTEXT_DIR_NAME="gettext" +CT_GETTEXT_PKG_NAME="gettext" +# CT_GETTEXT_SRC_RELEASE is not set +# CT_GETTEXT_SRC_DEVEL is not set +CT_GETTEXT_SRC_CUSTOM=y +CT_GETTEXT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gettext/gettext-0.21" +CT_GETTEXT_PATCH_GLOBAL=y +# CT_GETTEXT_PATCH_BUNDLED is not set +# CT_GETTEXT_PATCH_LOCAL is not set +# CT_GETTEXT_PATCH_BUNDLED_LOCAL is not set +# CT_GETTEXT_PATCH_LOCAL_BUNDLED is not set +# CT_GETTEXT_PATCH_NONE is not set +CT_GETTEXT_PATCH_ORDER="global" +# CT_GETTEXT_VERY_NEW is not set +CT_GETTEXT_V_0_21=y +# CT_GETTEXT_V_0_20_1 is not set +# CT_GETTEXT_V_0_19_8_1 is not set +CT_GETTEXT_VERSION="0.21" +CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" +CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" +CT_GETTEXT_0_21_or_later=y +CT_GETTEXT_0_21_or_older=y +CT_GETTEXT_INCOMPATIBLE_WITH_UCLIBC_NG=y + +# +# This version of gettext is not compatible with uClibc-NG. Select +# + +# +# a different version if uClibc-NG is used on the target or (in a +# + +# +# Canadian cross build) on the host. +# +CT_COMP_LIBS_GMP=y +CT_COMP_LIBS_GMP_PKG_KSYM="GMP" +CT_GMP_DIR_NAME="gmp" +CT_GMP_PKG_NAME="gmp" +# CT_GMP_SRC_RELEASE is not set +# CT_GMP_SRC_DEVEL is not set +CT_GMP_SRC_CUSTOM=y +CT_GMP_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gmp/gmp-6.2.1" +CT_GMP_PATCH_GLOBAL=y +# CT_GMP_PATCH_BUNDLED is not set +# CT_GMP_PATCH_LOCAL is not set +# CT_GMP_PATCH_BUNDLED_LOCAL is not set +# CT_GMP_PATCH_LOCAL_BUNDLED is not set +# CT_GMP_PATCH_NONE is not set +CT_GMP_PATCH_ORDER="global" +# CT_GMP_VERY_NEW is not set +CT_GMP_V_6_2=y +# CT_GMP_V_6_1 is not set +CT_GMP_VERSION="6.2.1" +CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" +CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" +CT_GMP_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_ISL=y +CT_COMP_LIBS_ISL_PKG_KSYM="ISL" +CT_ISL_DIR_NAME="isl" +CT_ISL_PKG_NAME="isl" +# CT_ISL_SRC_RELEASE is not set +# CT_ISL_SRC_DEVEL is not set +CT_ISL_SRC_CUSTOM=y +CT_ISL_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/isl/isl-0.24" +CT_ISL_PATCH_GLOBAL=y +# CT_ISL_PATCH_BUNDLED is not set +# CT_ISL_PATCH_LOCAL is not set +# CT_ISL_PATCH_BUNDLED_LOCAL is not set +# CT_ISL_PATCH_LOCAL_BUNDLED is not set +# CT_ISL_PATCH_NONE is not set +CT_ISL_PATCH_ORDER="global" +# CT_ISL_VERY_NEW is not set +# CT_ISL_V_0_24 is not set +# CT_ISL_V_0_23 is not set +# CT_ISL_V_0_22 is not set +# CT_ISL_V_0_21 is not set +# CT_ISL_V_0_20 is not set +# CT_ISL_V_0_19 is not set +# CT_ISL_V_0_18 is not set +# CT_ISL_V_0_17 is not set +CT_ISL_V_0_16=y +# CT_ISL_V_0_15 is not set +CT_ISL_VERSION="0.16.1" +CT_ISL_MIRRORS="https://libisl.sourceforge.io" +CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_ISL_SIGNATURE_FORMAT="" +CT_ISL_0_18_or_older=y +CT_ISL_older_than_0_18=y +CT_ISL_later_than_0_15=y +CT_ISL_0_15_or_later=y +CT_COMP_LIBS_LIBELF=y +CT_COMP_LIBS_LIBELF_PKG_KSYM="LIBELF" +CT_LIBELF_DIR_NAME="libelf" +CT_LIBELF_PKG_NAME="libelf" +CT_LIBELF_SRC_RELEASE=y +# CT_LIBELF_SRC_DEVEL is not set +# CT_LIBELF_SRC_CUSTOM is not set +CT_LIBELF_PATCH_GLOBAL=y +# CT_LIBELF_PATCH_BUNDLED is not set +# CT_LIBELF_PATCH_LOCAL is not set +# CT_LIBELF_PATCH_BUNDLED_LOCAL is not set +# CT_LIBELF_PATCH_LOCAL_BUNDLED is not set +# CT_LIBELF_PATCH_NONE is not set +CT_LIBELF_PATCH_ORDER="global" +CT_LIBELF_V_0_8=y +CT_LIBELF_VERSION="0.8.13" +CT_LIBELF_MIRRORS="https://fossies.org/linux/misc/old http://oe-lite.org/mirror/libelf/ http://ftp.osuosl.org/pub/blfs/conglomeration/libelf/" +CT_LIBELF_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LIBELF_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LIBELF_ARCHIVE_FORMATS=".tar.gz" +CT_LIBELF_SIGNATURE_FORMAT="" +CT_COMP_LIBS_LIBICONV=y +CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" +CT_LIBICONV_DIR_NAME="libiconv" +CT_LIBICONV_PKG_NAME="libiconv" +# CT_LIBICONV_SRC_RELEASE is not set +# CT_LIBICONV_SRC_DEVEL is not set +CT_LIBICONV_SRC_CUSTOM=y +CT_LIBICONV_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libiconv/libiconv-1.16" +CT_LIBICONV_PATCH_GLOBAL=y +# CT_LIBICONV_PATCH_BUNDLED is not set +# CT_LIBICONV_PATCH_LOCAL is not set +# CT_LIBICONV_PATCH_BUNDLED_LOCAL is not set +# CT_LIBICONV_PATCH_LOCAL_BUNDLED is not set +# CT_LIBICONV_PATCH_NONE is not set +CT_LIBICONV_PATCH_ORDER="global" +# CT_LIBICONV_VERY_NEW is not set +CT_LIBICONV_V_1_16=y +# CT_LIBICONV_V_1_15 is not set +CT_LIBICONV_VERSION="1.16" +CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" +CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" +CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPC=y +CT_COMP_LIBS_MPC_PKG_KSYM="MPC" +CT_MPC_DIR_NAME="mpc" +CT_MPC_PKG_NAME="mpc" +# CT_MPC_SRC_RELEASE is not set +# CT_MPC_SRC_DEVEL is not set +CT_MPC_SRC_CUSTOM=y +CT_MPC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libmpc/mpc-1.3.1" +CT_MPC_PATCH_GLOBAL=y +# CT_MPC_PATCH_BUNDLED is not set +# CT_MPC_PATCH_LOCAL is not set +# CT_MPC_PATCH_BUNDLED_LOCAL is not set +# CT_MPC_PATCH_LOCAL_BUNDLED is not set +# CT_MPC_PATCH_NONE is not set +CT_MPC_PATCH_ORDER="global" +# CT_MPC_VERY_NEW is not set +CT_MPC_V_1_2=y +# CT_MPC_V_1_1 is not set +# CT_MPC_V_1_0 is not set +CT_MPC_VERSION="1.2.1" +CT_MPC_MIRRORS="http://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" +CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_FORMATS=".tar.gz" +CT_MPC_SIGNATURE_FORMAT="packed/.sig" +CT_MPC_later_than_1_1_0=y +CT_MPC_1_1_0_or_later=y +CT_COMP_LIBS_MPFR=y +CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" +CT_MPFR_DIR_NAME="mpfr" +CT_MPFR_PKG_NAME="mpfr" +# CT_MPFR_SRC_RELEASE is not set +# CT_MPFR_SRC_DEVEL is not set +CT_MPFR_SRC_CUSTOM=y +CT_MPFR_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/mpfr/mpfr-4.1.0" +CT_MPFR_PATCH_GLOBAL=y +# CT_MPFR_PATCH_BUNDLED is not set +# CT_MPFR_PATCH_LOCAL is not set +# CT_MPFR_PATCH_BUNDLED_LOCAL is not set +# CT_MPFR_PATCH_LOCAL_BUNDLED is not set +# CT_MPFR_PATCH_NONE is not set +CT_MPFR_PATCH_ORDER="global" +# CT_MPFR_VERY_NEW is not set +CT_MPFR_V_4_1=y +# CT_MPFR_V_4_0 is not set +# CT_MPFR_V_3_1 is not set +CT_MPFR_VERSION="4.1.0" +CT_MPFR_MIRRORS="http://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" +CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" +CT_MPFR_SIGNATURE_FORMAT="packed/.asc" +CT_MPFR_later_than_4_0_0=y +CT_MPFR_4_0_0_or_later=y +CT_COMP_LIBS_NCURSES=y +CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" +CT_NCURSES_DIR_NAME="ncurses" +CT_NCURSES_PKG_NAME="ncurses" +# CT_NCURSES_SRC_RELEASE is not set +# CT_NCURSES_SRC_DEVEL is not set +CT_NCURSES_SRC_CUSTOM=y +CT_NCURSES_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/ncurses/ncurses-6.4" +CT_NCURSES_PATCH_GLOBAL=y +# CT_NCURSES_PATCH_BUNDLED is not set +# CT_NCURSES_PATCH_LOCAL is not set +# CT_NCURSES_PATCH_BUNDLED_LOCAL is not set +# CT_NCURSES_PATCH_LOCAL_BUNDLED is not set +# CT_NCURSES_PATCH_NONE is not set +CT_NCURSES_PATCH_ORDER="global" +# CT_NCURSES_VERY_NEW is not set +CT_NCURSES_V_6_2=y +# CT_NCURSES_V_6_1 is not set +# CT_NCURSES_V_6_0 is not set +CT_NCURSES_VERSION="6.2" +CT_NCURSES_MIRRORS="https://invisible-mirror.net/archives/ncurses $(CT_Mirrors GNU ncurses)" +CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" +CT_NCURSES_SIGNATURE_FORMAT="packed/.sig" +CT_NCURSES_NEW_ABI=y +CT_NCURSES_HOST_CONFIG_ARGS="" +CT_NCURSES_HOST_DISABLE_DB=y +CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" +CT_NCURSES_TARGET_CONFIG_ARGS="" +# CT_NCURSES_TARGET_DISABLE_DB is not set +CT_NCURSES_TARGET_FALLBACKS="" +CT_COMP_LIBS_ZLIB=y +CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" +CT_ZLIB_DIR_NAME="zlib" +CT_ZLIB_PKG_NAME="zlib" +# CT_ZLIB_SRC_RELEASE is not set +# CT_ZLIB_SRC_DEVEL is not set +CT_ZLIB_SRC_CUSTOM=y +CT_ZLIB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/zlib/zlib-1.2.13" +CT_ZLIB_PATCH_GLOBAL=y +# CT_ZLIB_PATCH_BUNDLED is not set +# CT_ZLIB_PATCH_LOCAL is not set +# CT_ZLIB_PATCH_BUNDLED_LOCAL is not set +# CT_ZLIB_PATCH_LOCAL_BUNDLED is not set +# CT_ZLIB_PATCH_NONE is not set +CT_ZLIB_PATCH_ORDER="global" +# CT_ZLIB_VERY_NEW is not set +CT_ZLIB_V_1_2_12=y +CT_ZLIB_VERSION="1.2.12" +CT_ZLIB_MIRRORS="http://downloads.sourceforge.net/project/libpng/zlib/${CT_ZLIB_VERSION} https://www.zlib.net/" +CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" +CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP GNUPRUMCU ISL LIBELF LIBICONV MPC MPFR NCURSES NEWLIB_NANO PICOLIBC ZLIB" +CT_LIBICONV_NEEDED=y +CT_GETTEXT_NEEDED=y +CT_GMP_NEEDED=y +CT_MPFR_NEEDED=y +CT_ISL_NEEDED=y +CT_MPC_NEEDED=y +CT_EXPAT_NEEDED=y +CT_NCURSES_NEEDED=y +CT_ZLIB_NEEDED=y +CT_LIBICONV=y +CT_GETTEXT=y +CT_GMP=y +CT_MPFR=y +CT_ISL=y +CT_MPC=y +CT_EXPAT=y +CT_NCURSES=y +CT_ZLIB=y +# end of Companion libraries + +# +# Companion tools +# +# CT_COMP_TOOLS_FOR_HOST is not set +# CT_COMP_TOOLS_AUTOCONF is not set +# CT_COMP_TOOLS_AUTOMAKE is not set +# CT_COMP_TOOLS_BISON is not set +# CT_COMP_TOOLS_DTC is not set +# CT_COMP_TOOLS_LIBTOOL is not set +# CT_COMP_TOOLS_M4 is not set +# CT_COMP_TOOLS_MAKE is not set +CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" +# end of Companion tools + +# +# Test suite +# +# CT_TEST_SUITE_GCC is not set +# end of Test suite diff --git a/cross_tools/configs/config_riscv64 b/cross_tools/configs/config_riscv64 new file mode 100644 index 0000000000000000000000000000000000000000..8ae3acbb3865b011e790d402adeed5a999d5f5b0 --- /dev/null +++ b/cross_tools/configs/config_riscv64 @@ -0,0 +1,999 @@ +# +# Automatically generated file; DO NOT EDIT. +# crosstool-NG 1.25.0 Configuration +# +CT_CONFIGURE_has_static_link=y +CT_CONFIGURE_has_cxx11=y +CT_CONFIGURE_has_curl=y +CT_CONFIGURE_has_rsync=y +CT_CONFIGURE_has_make_3_81_or_newer=y +CT_CONFIGURE_has_make_4_0_or_newer=y +CT_CONFIGURE_has_libtool_2_4_or_newer=y +CT_CONFIGURE_has_libtoolize_2_4_or_newer=y +CT_CONFIGURE_has_autoconf_2_65_or_newer=y +CT_CONFIGURE_has_autoreconf_2_65_or_newer=y +CT_CONFIGURE_has_automake_1_15_or_newer=y +CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y +CT_CONFIGURE_has_python_3_4_or_newer=y +CT_CONFIGURE_has_bison_2_7_or_newer=y +CT_CONFIGURE_has_python=y +CT_CONFIGURE_has_git=y +CT_CONFIGURE_has_md5sum=y +CT_CONFIGURE_has_sha1sum=y +CT_CONFIGURE_has_sha256sum=y +CT_CONFIGURE_has_sha512sum=y +CT_CONFIGURE_has_install_with_strip_program=y +CT_VERSION="1.25.0" +CT_VCHECK="" +CT_CONFIG_VERSION_ENV="4" +CT_CONFIG_VERSION_CURRENT="4" +CT_CONFIG_VERSION="4" +CT_MODULES=y + +# +# Paths and misc options +# + +# +# crosstool-NG behavior +# +# CT_OBSOLETE is not set +CT_EXPERIMENTAL=y +# CT_ALLOW_BUILD_AS_ROOT is not set +# CT_DEBUG_CT is not set + +# +# Paths +# +CT_LOCAL_TARBALLS_DIR="${HOME}/src" +CT_SAVE_TARBALLS=y +# CT_TARBALLS_BUILDROOT_LAYOUT is not set +CT_WORK_DIR="${CT_TOP_DIR}/.build" +CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_BUILD_DIR="${CT_BUILD_TOP_DIR}/build" +CT_PREFIX_DIR="${CT_PREFIX:-${HOME}/x-tools}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_RM_RF_PREFIX_DIR=y +CT_REMOVE_DOCS=y +CT_INSTALL_LICENSES=y +CT_PREFIX_DIR_RO=y +CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y +# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set + +# +# Downloading +# +CT_DOWNLOAD_AGENT_CURL=y +# CT_DOWNLOAD_AGENT_NONE is not set +# CT_FORBID_DOWNLOAD is not set +# CT_FORCE_DOWNLOAD is not set +CT_CONNECT_TIMEOUT=10 +CT_DOWNLOAD_CURL_OPTIONS="--location --ftp-pasv --retry 3 --fail --silent" +# CT_ONLY_DOWNLOAD is not set +# CT_USE_MIRROR is not set +CT_VERIFY_DOWNLOAD_DIGEST=y +CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y +# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set +CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" +# CT_VERIFY_DOWNLOAD_SIGNATURE is not set + +# +# Extracting +# +# CT_FORCE_EXTRACT is not set +CT_OVERRIDE_CONFIG_GUESS_SUB=y +# CT_ONLY_EXTRACT is not set +CT_PATCH_BUNDLED=y +# CT_PATCH_LOCAL is not set +# CT_PATCH_BUNDLED_LOCAL is not set +# CT_PATCH_LOCAL_BUNDLED is not set +# CT_PATCH_NONE is not set +CT_PATCH_ORDER="bundled" + +# +# Build behavior +# +CT_PARALLEL_JOBS=0 +CT_LOAD="" +CT_USE_PIPES=y +CT_EXTRA_CFLAGS_FOR_BUILD="" +CT_EXTRA_CXXFLAGS_FOR_BUILD="" +CT_EXTRA_LDFLAGS_FOR_BUILD="" +CT_EXTRA_CFLAGS_FOR_HOST="" +CT_EXTRA_LDFLAGS_FOR_HOST="" +# CT_CONFIG_SHELL_SH is not set +# CT_CONFIG_SHELL_ASH is not set +CT_CONFIG_SHELL_BASH=y +# CT_CONFIG_SHELL_CUSTOM is not set +CT_CONFIG_SHELL="${bash}" + +# +# Logging +# +# CT_LOG_ERROR is not set +# CT_LOG_WARN is not set +# CT_LOG_INFO is not set +CT_LOG_EXTRA=y +# CT_LOG_ALL is not set +# CT_LOG_DEBUG is not set +CT_LOG_LEVEL_MAX="EXTRA" +# CT_LOG_SEE_TOOLS_WARN is not set +CT_LOG_PROGRESS_BAR=y +CT_LOG_TO_FILE=y +CT_LOG_FILE_COMPRESS=y +# end of Paths and misc options + +# +# Target options +# +# CT_ARCH_ALPHA is not set +# CT_ARCH_ARC is not set +# CT_ARCH_ARM is not set +# CT_ARCH_AVR is not set +# CT_ARCH_C6X is not set +# CT_ARCH_M68K is not set +# CT_ARCH_MICROBLAZE is not set +# CT_ARCH_MIPS is not set +# CT_ARCH_MOXIE is not set +# CT_ARCH_MSP430 is not set +# CT_ARCH_NIOS2 is not set +# CT_ARCH_POWERPC is not set +# CT_ARCH_PRU is not set +CT_ARCH_RISCV=y +# CT_ARCH_S390 is not set +# CT_ARCH_SH is not set +# CT_ARCH_SPARC is not set +# CT_ARCH_X86 is not set +# CT_ARCH_XTENSA is not set +CT_ARCH="riscv" +CT_ARCH_CHOICE_KSYM="RISCV" +CT_ARCH_TUNE="" +CT_ARCH_RISCV_SHOW=y + +# +# Options for riscv +# +CT_ARCH_RISCV_PKG_KSYM="" +CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR C6X M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 POWERPC PRU RISCV S390 SH SPARC X86 XTENSA" +CT_ARCH_SUFFIX="" +# CT_OMIT_TARGET_VENDOR is not set + +# +# Generic target options +# +# CT_MULTILIB is not set +# CT_DEMULTILIB is not set +CT_ARCH_SUPPORTS_BOTH_MMU=y +CT_ARCH_USE_MMU=y +CT_ARCH_SUPPORTS_32=y +CT_ARCH_SUPPORTS_64=y +CT_ARCH_DEFAULT_32=y +CT_ARCH_BITNESS=64 +# CT_ARCH_32 is not set +CT_ARCH_64=y + +# +# Target optimisations +# +CT_ARCH_SUPPORTS_WITH_ARCH=y +CT_ARCH_SUPPORTS_WITH_ABI=y +CT_ARCH_SUPPORTS_WITH_TUNE=y +CT_ARCH_ARCH="rv64gc" +CT_ARCH_ABI="" +CT_TARGET_CFLAGS="" +CT_TARGET_LDFLAGS="" +# end of Target options + +# +# Toolchain options +# + +# +# General toolchain options +# +CT_FORCE_SYSROOT=y +CT_USE_SYSROOT=y +CT_SYSROOT_NAME="sysroot" +CT_SYSROOT_DIR_PREFIX="" +CT_WANTS_STATIC_LINK=y +CT_WANTS_STATIC_LINK_CXX=y +# CT_STATIC_TOOLCHAIN is not set +CT_SHOW_CT_VERSION=y +CT_TOOLCHAIN_PKGVERSION="" +CT_TOOLCHAIN_BUGURL="" + +# +# Tuple completion and aliasing +# +CT_TARGET_VENDOR="openeuler" +CT_TARGET_ALIAS_SED_EXPR="" +CT_TARGET_ALIAS="" + +# +# Toolchain type +# +# CT_NATIVE is not set +CT_CROSS=y +# CT_CROSS_NATIVE is not set +# CT_CANADIAN is not set +CT_TOOLCHAIN_TYPE="cross" + +# +# Build system +# +CT_BUILD="" +CT_BUILD_PREFIX="" +CT_BUILD_SUFFIX="" + +# +# Misc options +# +# CT_TOOLCHAIN_ENABLE_NLS is not set +# end of Toolchain options + +# +# Operating System +# +CT_KERNEL_SUPPORTS_SHARED_LIBS=y +# CT_KERNEL_BARE_METAL is not set +CT_KERNEL_LINUX=y +CT_KERNEL="linux" +CT_KERNEL_CHOICE_KSYM="LINUX" +CT_KERNEL_LINUX_SHOW=y + +# +# Options for linux +# +CT_KERNEL_LINUX_PKG_KSYM="LINUX" +CT_LINUX_DIR_NAME="linux" +CT_LINUX_USE_WWW_KERNEL_ORG=y +# CT_LINUX_USE_ORACLE is not set +CT_LINUX_USE="LINUX" +CT_LINUX_PKG_NAME="linux" +# CT_LINUX_SRC_RELEASE is not set +# CT_LINUX_SRC_DEVEL is not set +CT_LINUX_SRC_CUSTOM=y +CT_LINUX_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/kernel" +CT_LINUX_PATCH_GLOBAL=y +# CT_LINUX_PATCH_BUNDLED is not set +# CT_LINUX_PATCH_LOCAL is not set +# CT_LINUX_PATCH_BUNDLED_LOCAL is not set +# CT_LINUX_PATCH_LOCAL_BUNDLED is not set +# CT_LINUX_PATCH_NONE is not set +CT_LINUX_PATCH_ORDER="global" +# CT_LINUX_VERY_NEW is not set +# CT_LINUX_V_5_16 is not set +# CT_LINUX_V_5_15 is not set +# CT_LINUX_V_5_14 is not set +# CT_LINUX_V_5_13 is not set +# CT_LINUX_V_5_12 is not set +# CT_LINUX_V_5_11 is not set +CT_LINUX_V_5_10=y +# CT_LINUX_V_5_9 is not set +# CT_LINUX_V_5_8 is not set +# CT_LINUX_V_5_7 is not set +# CT_LINUX_V_5_4 is not set +# CT_LINUX_V_5_3 is not set +# CT_LINUX_V_5_2 is not set +# CT_LINUX_V_5_1 is not set +# CT_LINUX_V_5_0 is not set +# CT_LINUX_V_4_20 is not set +# CT_LINUX_V_4_19 is not set +# CT_LINUX_V_4_18 is not set +# CT_LINUX_V_4_17 is not set +# CT_LINUX_V_4_16 is not set +# CT_LINUX_V_4_15 is not set +# CT_LINUX_V_4_14 is not set +# CT_LINUX_V_4_13 is not set +# CT_LINUX_V_4_12 is not set +# CT_LINUX_V_4_11 is not set +# CT_LINUX_V_4_10 is not set +# CT_LINUX_V_4_9 is not set +# CT_LINUX_V_4_4 is not set +# CT_LINUX_V_4_1 is not set +# CT_LINUX_V_3_16 is not set +# CT_LINUX_V_3_13 is not set +# CT_LINUX_V_3_12 is not set +# CT_LINUX_V_3_10 is not set +# CT_LINUX_V_3_4 is not set +# CT_LINUX_V_3_2 is not set +CT_LINUX_VERSION="5.10.100" +CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" +CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" +CT_LINUX_5_12_or_older=y +CT_LINUX_older_than_5_12=y +CT_LINUX_later_than_5_3=y +CT_LINUX_5_3_or_later=y +CT_LINUX_later_than_4_8=y +CT_LINUX_4_8_or_later=y +CT_LINUX_later_than_3_7=y +CT_LINUX_3_7_or_later=y +CT_LINUX_later_than_3_2=y +CT_LINUX_3_2_or_later=y +CT_LINUX_REQUIRE_3_2_or_later=y +CT_KERNEL_DEP_RSYNC=y +CT_KERNEL_LINUX_VERBOSITY_0=y +# CT_KERNEL_LINUX_VERBOSITY_1 is not set +# CT_KERNEL_LINUX_VERBOSITY_2 is not set +CT_KERNEL_LINUX_VERBOSE_LEVEL=0 +CT_KERNEL_LINUX_INSTALL_CHECK=y +CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" + +# +# Common kernel options +# +CT_SHARED_LIBS=y +# end of Operating System + +# +# Binary utilities +# +CT_ARCH_BINFMT_ELF=y +CT_BINUTILS_BINUTILS=y +CT_BINUTILS="binutils" +CT_BINUTILS_CHOICE_KSYM="BINUTILS" +CT_BINUTILS_BINUTILS_SHOW=y + +# +# Options for binutils +# +CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" +CT_BINUTILS_DIR_NAME="binutils" +CT_BINUTILS_USE_GNU=y +# CT_BINUTILS_USE_LINARO is not set +# CT_BINUTILS_USE_ORACLE is not set +CT_BINUTILS_USE="BINUTILS" +CT_BINUTILS_PKG_NAME="binutils" +# CT_BINUTILS_SRC_RELEASE is not set +# CT_BINUTILS_SRC_DEVEL is not set +CT_BINUTILS_SRC_CUSTOM=y +CT_BINUTILS_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/binutils/binutils-2.37" +CT_BINUTILS_PATCH_GLOBAL=y +# CT_BINUTILS_PATCH_BUNDLED is not set +# CT_BINUTILS_PATCH_LOCAL is not set +# CT_BINUTILS_PATCH_BUNDLED_LOCAL is not set +# CT_BINUTILS_PATCH_LOCAL_BUNDLED is not set +# CT_BINUTILS_PATCH_NONE is not set +CT_BINUTILS_PATCH_ORDER="global" +# CT_BINUTILS_VERY_NEW is not set +# CT_BINUTILS_V_2_38 is not set +CT_BINUTILS_V_2_37=y +# CT_BINUTILS_V_2_36 is not set +# CT_BINUTILS_V_2_35 is not set +# CT_BINUTILS_V_2_34 is not set +# CT_BINUTILS_V_2_33 is not set +# CT_BINUTILS_V_2_32 is not set +# CT_BINUTILS_V_2_31 is not set +# CT_BINUTILS_V_2_30 is not set +# CT_BINUTILS_V_2_29 is not set +# CT_BINUTILS_V_2_28 is not set +# CT_BINUTILS_V_2_27 is not set +# CT_BINUTILS_V_2_26 is not set +CT_BINUTILS_VERSION="2.37" +CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" +CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" +CT_BINUTILS_later_than_2_30=y +CT_BINUTILS_2_30_or_later=y +CT_BINUTILS_later_than_2_27=y +CT_BINUTILS_2_27_or_later=y +CT_BINUTILS_later_than_2_26=y +CT_BINUTILS_2_26_or_later=y + +# +# GNU binutils +# +CT_BINUTILS_FORCE_LD_BFD_DEFAULT=y +CT_BINUTILS_LINKER_LD=y +CT_BINUTILS_LINKERS_LIST="ld" +CT_BINUTILS_LINKER_DEFAULT="bfd" +# CT_BINUTILS_PLUGINS is not set +CT_BINUTILS_RELRO=m +CT_BINUTILS_DETERMINISTIC_ARCHIVES=y +CT_BINUTILS_EXTRA_CONFIG_ARRAY="" +# CT_BINUTILS_FOR_TARGET is not set +CT_ALL_BINUTILS_CHOICES="BINUTILS" +# end of Binary utilities + +# +# C-library +# +CT_LIBC_GLIBC=y +# CT_LIBC_MUSL is not set +# CT_LIBC_UCLIBC_NG is not set +CT_LIBC="glibc" +CT_LIBC_CHOICE_KSYM="GLIBC" +CT_THREADS="nptl" +CT_LIBC_GLIBC_SHOW=y + +# +# Options for glibc +# +CT_LIBC_GLIBC_PKG_KSYM="GLIBC" +CT_GLIBC_DIR_NAME="glibc" +CT_GLIBC_USE_GNU=y +# CT_GLIBC_USE_ORACLE is not set +CT_GLIBC_USE="GLIBC" +CT_GLIBC_PKG_NAME="glibc" +# CT_GLIBC_SRC_RELEASE is not set +# CT_GLIBC_SRC_DEVEL is not set +CT_GLIBC_SRC_CUSTOM=y +CT_GLIBC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/glibc/glibc-2.36" +CT_GLIBC_PATCH_GLOBAL=y +# CT_GLIBC_PATCH_BUNDLED is not set +# CT_GLIBC_PATCH_LOCAL is not set +# CT_GLIBC_PATCH_BUNDLED_LOCAL is not set +# CT_GLIBC_PATCH_LOCAL_BUNDLED is not set +# CT_GLIBC_PATCH_NONE is not set +CT_GLIBC_PATCH_ORDER="global" +# CT_GLIBC_VERY_NEW is not set +CT_GLIBC_V_2_35=y +# CT_GLIBC_V_2_34 is not set +# CT_GLIBC_V_2_33 is not set +# CT_GLIBC_V_2_32 is not set +# CT_GLIBC_V_2_31 is not set +# CT_GLIBC_V_2_30 is not set +# CT_GLIBC_V_2_29 is not set +CT_GLIBC_VERSION="2.35" +CT_GLIBC_MIRRORS="$(CT_Mirrors GNU glibc)" +CT_GLIBC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GLIBC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GLIBC_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_GLIBC_SIGNATURE_FORMAT="packed/.sig" +CT_GLIBC_later_than_2_34=y +CT_GLIBC_2_34_or_later=y +CT_GLIBC_later_than_2_32=y +CT_GLIBC_2_32_or_later=y +CT_GLIBC_later_than_2_31=y +CT_GLIBC_2_31_or_later=y +CT_GLIBC_later_than_2_30=y +CT_GLIBC_2_30_or_later=y +CT_GLIBC_later_than_2_29=y +CT_GLIBC_2_29_or_later=y +CT_GLIBC_REQUIRE_2_29_or_later=y +CT_GLIBC_later_than_2_28=y +CT_GLIBC_2_28_or_later=y +CT_GLIBC_later_than_2_27=y +CT_GLIBC_2_27_or_later=y +CT_GLIBC_later_than_2_26=y +CT_GLIBC_2_26_or_later=y +CT_GLIBC_later_than_2_25=y +CT_GLIBC_2_25_or_later=y +CT_GLIBC_later_than_2_24=y +CT_GLIBC_2_24_or_later=y +CT_GLIBC_later_than_2_23=y +CT_GLIBC_2_23_or_later=y +CT_GLIBC_later_than_2_20=y +CT_GLIBC_2_20_or_later=y +CT_GLIBC_later_than_2_17=y +CT_GLIBC_2_17_or_later=y +CT_GLIBC_later_than_2_14=y +CT_GLIBC_2_14_or_later=y +CT_GLIBC_DEP_KERNEL_HEADERS_VERSION=y +CT_GLIBC_DEP_BINUTILS=y +CT_GLIBC_DEP_GCC=y +CT_GLIBC_DEP_PYTHON=y +CT_GLIBC_BUILD_SSP=y +CT_GLIBC_HAS_LIBIDN_ADDON=y +# CT_GLIBC_USE_LIBIDN_ADDON is not set +CT_GLIBC_NO_SPARC_V8=y +CT_GLIBC_EXTRA_CONFIG_ARRAY="--enable-crypt" +CT_GLIBC_CONFIGPARMS="rtlddir=/lib64/lp64d" +CT_GLIBC_EXTRA_CFLAGS="" +# CT_GLIBC_ENABLE_FORTIFIED_BUILD is not set +# CT_GLIBC_DISABLE_VERSIONING is not set +CT_GLIBC_OLDEST_ABI="" +CT_GLIBC_FORCE_UNWIND=y +# CT_GLIBC_LOCALES is not set +# CT_GLIBC_KERNEL_VERSION_NONE is not set +CT_GLIBC_KERNEL_VERSION_AS_HEADERS=y +# CT_GLIBC_KERNEL_VERSION_CHOSEN is not set +CT_GLIBC_MIN_KERNEL="5.10.100" +CT_GLIBC_SSP_DEFAULT=y +# CT_GLIBC_SSP_NO is not set +# CT_GLIBC_SSP_YES is not set +# CT_GLIBC_SSP_ALL is not set +# CT_GLIBC_SSP_STRONG is not set +CT_GLIBC_ENABLE_WERROR=y +# CT_GLIBC_ENABLE_COMMON_FLAG is not set +CT_ALL_LIBC_CHOICES="AVR_LIBC BIONIC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE UCLIBC_NG" +CT_LIBC_SUPPORT_THREADS_ANY=y +CT_LIBC_SUPPORT_THREADS_NATIVE=y + +# +# Common C library options +# +CT_THREADS_NATIVE=y +# CT_CREATE_LDSO_CONF is not set +CT_LIBC_XLDD=y +# end of C-library + +# +# C compiler +# +CT_CC_CORE_NEEDED=y +CT_CC_SUPPORT_CXX=y +CT_CC_SUPPORT_FORTRAN=y +CT_CC_SUPPORT_ADA=y +CT_CC_SUPPORT_OBJC=y +CT_CC_SUPPORT_OBJCXX=y +CT_CC_SUPPORT_GOLANG=y +CT_CC_GCC=y +CT_CC="gcc" +CT_CC_CHOICE_KSYM="GCC" +CT_CC_GCC_SHOW=y + +# +# Options for gcc +# +CT_CC_GCC_PKG_KSYM="GCC" +CT_GCC_DIR_NAME="gcc" +CT_GCC_USE_GNU=y +# CT_GCC_USE_LINARO is not set +CT_GCC_USE="GCC" +CT_GCC_PKG_NAME="gcc" +# CT_GCC_SRC_RELEASE is not set +# CT_GCC_SRC_DEVEL is not set +CT_GCC_SRC_CUSTOM=y +CT_GCC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gcc/gcc-10.3.0" +CT_GCC_PATCH_GLOBAL=y +# CT_GCC_PATCH_BUNDLED is not set +# CT_GCC_PATCH_LOCAL is not set +# CT_GCC_PATCH_BUNDLED_LOCAL is not set +# CT_GCC_PATCH_LOCAL_BUNDLED is not set +# CT_GCC_PATCH_NONE is not set +CT_GCC_PATCH_ORDER="global" +# CT_GCC_VERY_NEW is not set +# CT_GCC_V_11 is not set +CT_GCC_V_10=y +# CT_GCC_V_9 is not set +# CT_GCC_V_8 is not set +# CT_GCC_V_7 is not set +CT_GCC_VERSION="10.3.0" +CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" +CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GCC_SIGNATURE_FORMAT="" +CT_GCC_11_or_older=y +CT_GCC_older_than_11=y +CT_GCC_later_than_10=y +CT_GCC_10_or_later=y +CT_GCC_later_than_9=y +CT_GCC_9_or_later=y +CT_GCC_later_than_8=y +CT_GCC_8_or_later=y +CT_GCC_later_than_7=y +CT_GCC_7_or_later=y +CT_GCC_REQUIRE_7_or_later=y +CT_GCC_later_than_6=y +CT_GCC_6_or_later=y +CT_GCC_REQUIRE_6_or_later=y +CT_GCC_later_than_5=y +CT_GCC_5_or_later=y +CT_GCC_REQUIRE_5_or_later=y +CT_GCC_later_than_4_9=y +CT_GCC_4_9_or_later=y +CT_GCC_REQUIRE_4_9_or_later=y +CT_CC_GCC_HAS_LIBMPX=y +CT_CC_GCC_ENABLE_CXX_FLAGS="" +CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_EXTRA_CONFIG_ARRAY=" --disable-multilib --with-abi=lp64d --with-gnu-as --with-gnu-ld --enable-c99 --enable-shared --enable-poison-system-directories --enable-symvers=gnu --disable-bootstrap --disable-libstdcxx-dual-abi --enable-default-pie --with-toolexeclibdir=\"${CT_PREFIX_DIR}/${CT_TARGET}/sysroot/lib64/lp64d/\"" +CT_CC_GCC_STATIC_LIBSTDCXX=y +# CT_CC_GCC_SYSTEM_ZLIB is not set +CT_CC_GCC_CONFIG_TLS=m + +# +# Optimisation features +# +CT_CC_GCC_USE_GRAPHITE=y +CT_CC_GCC_USE_LTO=y +CT_CC_GCC_LTO_ZSTD=m + +# +# Settings for libraries running on target +# +CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y +# CT_CC_GCC_LIBMUDFLAP is not set +# CT_CC_GCC_LIBGOMP is not set +# CT_CC_GCC_LIBSSP is not set +# CT_CC_GCC_LIBQUADMATH is not set +# CT_CC_GCC_LIBSANITIZER is not set + +# +# Misc. obscure options. +# +CT_CC_CXA_ATEXIT=y +CT_CC_GCC_TM_CLONE_REGISTRY=m +# CT_CC_GCC_DISABLE_PCH is not set +CT_CC_GCC_SJLJ_EXCEPTIONS=m +CT_CC_GCC_LDBL_128=m +# CT_CC_GCC_BUILD_ID is not set +CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y +# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set +# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set +# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set +CT_CC_GCC_LNK_HASH_STYLE="" +CT_CC_GCC_DEC_FLOATS_AUTO=y +# CT_CC_GCC_DEC_FLOATS_BID is not set +# CT_CC_GCC_DEC_FLOATS_DPD is not set +# CT_CC_GCC_DEC_FLOATS_NO is not set +CT_CC_GCC_DEC_FLOATS="" +CT_ALL_CC_CHOICES="GCC" + +# +# Additional supported languages: +# +CT_CC_LANG_CXX=y +# CT_CC_LANG_FORTRAN is not set +# CT_CC_LANG_ADA is not set +# CT_CC_LANG_OBJC is not set +# CT_CC_LANG_OBJCXX is not set +# CT_CC_LANG_GOLANG is not set +CT_CC_LANG_OTHERS="" +# end of C compiler + +# +# Debug facilities +# +# CT_DEBUG_DUMA is not set +CT_DEBUG_GDB=y +CT_DEBUG_GDB_PKG_KSYM="GDB" +CT_GDB_DIR_NAME="gdb" +CT_GDB_PKG_NAME="gdb" +# CT_GDB_SRC_RELEASE is not set +# CT_GDB_SRC_DEVEL is not set +CT_GDB_SRC_CUSTOM=y +CT_GDB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gdb/gdb-12.1" +CT_GDB_PATCH_GLOBAL=y +# CT_GDB_PATCH_BUNDLED is not set +# CT_GDB_PATCH_LOCAL is not set +# CT_GDB_PATCH_BUNDLED_LOCAL is not set +# CT_GDB_PATCH_LOCAL_BUNDLED is not set +# CT_GDB_PATCH_NONE is not set +CT_GDB_PATCH_ORDER="global" +# CT_GDB_VERY_NEW is not set +CT_GDB_V_11=y +# CT_GDB_V_10 is not set +# CT_GDB_V_9 is not set +# CT_GDB_V_8_3 is not set +CT_GDB_VERSION="11.2" +CT_GDB_MIRRORS="$(CT_Mirrors GNU gdb) $(CT_Mirrors sourceware gdb/releases)" +CT_GDB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GDB_SIGNATURE_FORMAT="" +CT_GDB_later_than_11=y +CT_GDB_11_or_later=y +CT_GDB_later_than_10=y +CT_GDB_10_or_later=y +CT_GDB_later_than_8_3=y +CT_GDB_8_3_or_later=y +CT_GDB_later_than_8_0=y +CT_GDB_8_0_or_later=y +CT_GDB_REQUIRE_8_0_or_later=y +CT_GDB_later_than_7_12=y +CT_GDB_7_12_or_later=y +CT_GDB_later_than_7_11=y +CT_GDB_7_11_or_later=y +CT_GDB_CROSS=y +# CT_GDB_CROSS_STATIC is not set +# CT_GDB_CROSS_SIM is not set +# CT_GDB_CROSS_PYTHON is not set +CT_GDB_CROSS_EXTRA_CONFIG_ARRAY="" +# CT_GDB_NATIVE is not set +# CT_GDB_GDBSERVER is not set +CT_GDB_GDBSERVER_TOPLEVEL=y +# CT_DEBUG_LTRACE is not set +# CT_DEBUG_STRACE is not set +CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" +# end of Debug facilities + +# +# Companion libraries +# +# CT_COMPLIBS_CHECK is not set +# CT_COMP_LIBS_CLOOG is not set +CT_COMP_LIBS_EXPAT=y +CT_COMP_LIBS_EXPAT_PKG_KSYM="EXPAT" +CT_EXPAT_DIR_NAME="expat" +CT_EXPAT_PKG_NAME="expat" +# CT_EXPAT_SRC_RELEASE is not set +# CT_EXPAT_SRC_DEVEL is not set +CT_EXPAT_SRC_CUSTOM=y +CT_EXPAT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/expat/expat-2.5.0" +CT_EXPAT_PATCH_GLOBAL=y +# CT_EXPAT_PATCH_BUNDLED is not set +# CT_EXPAT_PATCH_LOCAL is not set +# CT_EXPAT_PATCH_BUNDLED_LOCAL is not set +# CT_EXPAT_PATCH_LOCAL_BUNDLED is not set +# CT_EXPAT_PATCH_NONE is not set +CT_EXPAT_PATCH_ORDER="global" +CT_EXPAT_VERY_NEW=y +# CT_EXPAT_V_2_4 is not set +CT_EXPAT_VERSION="new" +CT_EXPAT_MIRRORS="http://downloads.sourceforge.net/project/expat/expat/${CT_EXPAT_VERSION} https://github.com/libexpat/libexpat/releases/download/R_${CT_EXPAT_VERSION//./_}" +CT_EXPAT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2 .tar.gz" +CT_EXPAT_SIGNATURE_FORMAT="" +CT_COMP_LIBS_GETTEXT=y +CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" +CT_GETTEXT_DIR_NAME="gettext" +CT_GETTEXT_PKG_NAME="gettext" +# CT_GETTEXT_SRC_RELEASE is not set +# CT_GETTEXT_SRC_DEVEL is not set +CT_GETTEXT_SRC_CUSTOM=y +CT_GETTEXT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gettext/gettext-0.21" +CT_GETTEXT_PATCH_GLOBAL=y +# CT_GETTEXT_PATCH_BUNDLED is not set +# CT_GETTEXT_PATCH_LOCAL is not set +# CT_GETTEXT_PATCH_BUNDLED_LOCAL is not set +# CT_GETTEXT_PATCH_LOCAL_BUNDLED is not set +# CT_GETTEXT_PATCH_NONE is not set +CT_GETTEXT_PATCH_ORDER="global" +# CT_GETTEXT_VERY_NEW is not set +CT_GETTEXT_V_0_21=y +# CT_GETTEXT_V_0_20_1 is not set +# CT_GETTEXT_V_0_19_8_1 is not set +CT_GETTEXT_VERSION="0.21" +CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" +CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" +CT_GETTEXT_0_21_or_later=y +CT_GETTEXT_0_21_or_older=y +CT_GETTEXT_INCOMPATIBLE_WITH_UCLIBC_NG=y + +# +# This version of gettext is not compatible with uClibc-NG. Select +# + +# +# a different version if uClibc-NG is used on the target or (in a +# + +# +# Canadian cross build) on the host. +# +CT_COMP_LIBS_GMP=y +CT_COMP_LIBS_GMP_PKG_KSYM="GMP" +CT_GMP_DIR_NAME="gmp" +CT_GMP_PKG_NAME="gmp" +# CT_GMP_SRC_RELEASE is not set +# CT_GMP_SRC_DEVEL is not set +CT_GMP_SRC_CUSTOM=y +CT_GMP_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gmp/gmp-6.2.1" +CT_GMP_PATCH_GLOBAL=y +# CT_GMP_PATCH_BUNDLED is not set +# CT_GMP_PATCH_LOCAL is not set +# CT_GMP_PATCH_BUNDLED_LOCAL is not set +# CT_GMP_PATCH_LOCAL_BUNDLED is not set +# CT_GMP_PATCH_NONE is not set +CT_GMP_PATCH_ORDER="global" +# CT_GMP_VERY_NEW is not set +CT_GMP_V_6_2=y +# CT_GMP_V_6_1 is not set +CT_GMP_VERSION="6.2.1" +CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" +CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" +CT_GMP_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_ISL=y +CT_COMP_LIBS_ISL_PKG_KSYM="ISL" +CT_ISL_DIR_NAME="isl" +CT_ISL_PKG_NAME="isl" +# CT_ISL_SRC_RELEASE is not set +# CT_ISL_SRC_DEVEL is not set +CT_ISL_SRC_CUSTOM=y +CT_ISL_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/isl/isl-0.24" +CT_ISL_PATCH_GLOBAL=y +# CT_ISL_PATCH_BUNDLED is not set +# CT_ISL_PATCH_LOCAL is not set +# CT_ISL_PATCH_BUNDLED_LOCAL is not set +# CT_ISL_PATCH_LOCAL_BUNDLED is not set +# CT_ISL_PATCH_NONE is not set +CT_ISL_PATCH_ORDER="global" +# CT_ISL_VERY_NEW is not set +# CT_ISL_V_0_24 is not set +# CT_ISL_V_0_23 is not set +# CT_ISL_V_0_22 is not set +# CT_ISL_V_0_21 is not set +# CT_ISL_V_0_20 is not set +# CT_ISL_V_0_19 is not set +# CT_ISL_V_0_18 is not set +# CT_ISL_V_0_17 is not set +CT_ISL_V_0_16=y +# CT_ISL_V_0_15 is not set +CT_ISL_VERSION="0.16.1" +CT_ISL_MIRRORS="https://libisl.sourceforge.io" +CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_ISL_SIGNATURE_FORMAT="" +CT_ISL_0_18_or_older=y +CT_ISL_older_than_0_18=y +CT_ISL_later_than_0_15=y +CT_ISL_0_15_or_later=y +# CT_COMP_LIBS_LIBELF is not set +CT_COMP_LIBS_LIBICONV=y +CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" +CT_LIBICONV_DIR_NAME="libiconv" +CT_LIBICONV_PKG_NAME="libiconv" +# CT_LIBICONV_SRC_RELEASE is not set +# CT_LIBICONV_SRC_DEVEL is not set +CT_LIBICONV_SRC_CUSTOM=y +CT_LIBICONV_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libiconv/libiconv-1.16" +CT_LIBICONV_PATCH_GLOBAL=y +# CT_LIBICONV_PATCH_BUNDLED is not set +# CT_LIBICONV_PATCH_LOCAL is not set +# CT_LIBICONV_PATCH_BUNDLED_LOCAL is not set +# CT_LIBICONV_PATCH_LOCAL_BUNDLED is not set +# CT_LIBICONV_PATCH_NONE is not set +CT_LIBICONV_PATCH_ORDER="global" +# CT_LIBICONV_VERY_NEW is not set +CT_LIBICONV_V_1_16=y +# CT_LIBICONV_V_1_15 is not set +CT_LIBICONV_VERSION="1.16" +CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" +CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" +CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPC=y +CT_COMP_LIBS_MPC_PKG_KSYM="MPC" +CT_MPC_DIR_NAME="mpc" +CT_MPC_PKG_NAME="mpc" +# CT_MPC_SRC_RELEASE is not set +# CT_MPC_SRC_DEVEL is not set +CT_MPC_SRC_CUSTOM=y +CT_MPC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libmpc/mpc-1.3.1" +CT_MPC_PATCH_GLOBAL=y +# CT_MPC_PATCH_BUNDLED is not set +# CT_MPC_PATCH_LOCAL is not set +# CT_MPC_PATCH_BUNDLED_LOCAL is not set +# CT_MPC_PATCH_LOCAL_BUNDLED is not set +# CT_MPC_PATCH_NONE is not set +CT_MPC_PATCH_ORDER="global" +# CT_MPC_VERY_NEW is not set +CT_MPC_V_1_2=y +# CT_MPC_V_1_1 is not set +# CT_MPC_V_1_0 is not set +CT_MPC_VERSION="1.2.1" +CT_MPC_MIRRORS="http://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" +CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_FORMATS=".tar.gz" +CT_MPC_SIGNATURE_FORMAT="packed/.sig" +CT_MPC_later_than_1_1_0=y +CT_MPC_1_1_0_or_later=y +CT_COMP_LIBS_MPFR=y +CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" +CT_MPFR_DIR_NAME="mpfr" +CT_MPFR_PKG_NAME="mpfr" +# CT_MPFR_SRC_RELEASE is not set +# CT_MPFR_SRC_DEVEL is not set +CT_MPFR_SRC_CUSTOM=y +CT_MPFR_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/mpfr/mpfr-4.1.0" +CT_MPFR_PATCH_GLOBAL=y +# CT_MPFR_PATCH_BUNDLED is not set +# CT_MPFR_PATCH_LOCAL is not set +# CT_MPFR_PATCH_BUNDLED_LOCAL is not set +# CT_MPFR_PATCH_LOCAL_BUNDLED is not set +# CT_MPFR_PATCH_NONE is not set +CT_MPFR_PATCH_ORDER="global" +# CT_MPFR_VERY_NEW is not set +CT_MPFR_V_4_1=y +# CT_MPFR_V_4_0 is not set +# CT_MPFR_V_3_1 is not set +CT_MPFR_VERSION="4.1.0" +CT_MPFR_MIRRORS="http://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" +CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" +CT_MPFR_SIGNATURE_FORMAT="packed/.asc" +CT_MPFR_later_than_4_0_0=y +CT_MPFR_4_0_0_or_later=y +CT_COMP_LIBS_NCURSES=y +CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" +CT_NCURSES_DIR_NAME="ncurses" +CT_NCURSES_PKG_NAME="ncurses" +# CT_NCURSES_SRC_RELEASE is not set +# CT_NCURSES_SRC_DEVEL is not set +CT_NCURSES_SRC_CUSTOM=y +CT_NCURSES_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/ncurses/ncurses-6.4" +CT_NCURSES_PATCH_GLOBAL=y +# CT_NCURSES_PATCH_BUNDLED is not set +# CT_NCURSES_PATCH_LOCAL is not set +# CT_NCURSES_PATCH_BUNDLED_LOCAL is not set +# CT_NCURSES_PATCH_LOCAL_BUNDLED is not set +# CT_NCURSES_PATCH_NONE is not set +CT_NCURSES_PATCH_ORDER="global" +CT_NCURSES_VERY_NEW=y +# CT_NCURSES_V_6_2 is not set +# CT_NCURSES_V_6_1 is not set +# CT_NCURSES_V_6_0 is not set +CT_NCURSES_VERSION="new" +CT_NCURSES_MIRRORS="https://invisible-mirror.net/archives/ncurses $(CT_Mirrors GNU ncurses)" +CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" +CT_NCURSES_SIGNATURE_FORMAT="packed/.sig" +CT_NCURSES_NEW_ABI=y +CT_NCURSES_HOST_CONFIG_ARGS="" +CT_NCURSES_HOST_DISABLE_DB=y +CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" +CT_NCURSES_TARGET_CONFIG_ARGS="" +# CT_NCURSES_TARGET_DISABLE_DB is not set +CT_NCURSES_TARGET_FALLBACKS="" +CT_COMP_LIBS_ZLIB=y +CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" +CT_ZLIB_DIR_NAME="zlib" +CT_ZLIB_PKG_NAME="zlib" +# CT_ZLIB_SRC_RELEASE is not set +# CT_ZLIB_SRC_DEVEL is not set +CT_ZLIB_SRC_CUSTOM=y +CT_ZLIB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/zlib/zlib-1.2.13" +CT_ZLIB_PATCH_GLOBAL=y +# CT_ZLIB_PATCH_BUNDLED is not set +# CT_ZLIB_PATCH_LOCAL is not set +# CT_ZLIB_PATCH_BUNDLED_LOCAL is not set +# CT_ZLIB_PATCH_LOCAL_BUNDLED is not set +# CT_ZLIB_PATCH_NONE is not set +CT_ZLIB_PATCH_ORDER="global" +# CT_ZLIB_VERY_NEW is not set +CT_ZLIB_V_1_2_12=y +CT_ZLIB_VERSION="1.2.12" +CT_ZLIB_MIRRORS="http://downloads.sourceforge.net/project/libpng/zlib/${CT_ZLIB_VERSION} https://www.zlib.net/" +CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" +CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP GNUPRUMCU ISL LIBELF LIBICONV MPC MPFR NCURSES NEWLIB_NANO PICOLIBC ZLIB" +CT_LIBICONV_NEEDED=y +CT_GETTEXT_NEEDED=y +CT_GMP_NEEDED=y +CT_MPFR_NEEDED=y +CT_ISL_NEEDED=y +CT_MPC_NEEDED=y +CT_EXPAT_NEEDED=y +CT_NCURSES_NEEDED=y +CT_ZLIB_NEEDED=y +CT_LIBICONV=y +CT_GETTEXT=y +CT_GMP=y +CT_MPFR=y +CT_ISL=y +CT_MPC=y +CT_EXPAT=y +CT_NCURSES=y +CT_ZLIB=y +# end of Companion libraries + +# +# Companion tools +# +# CT_COMP_TOOLS_FOR_HOST is not set +# CT_COMP_TOOLS_AUTOCONF is not set +# CT_COMP_TOOLS_AUTOMAKE is not set +# CT_COMP_TOOLS_BISON is not set +# CT_COMP_TOOLS_DTC is not set +# CT_COMP_TOOLS_LIBTOOL is not set +# CT_COMP_TOOLS_M4 is not set +# CT_COMP_TOOLS_MAKE is not set +CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" +# end of Companion tools + +# +# Test suite +# +# CT_TEST_SUITE_GCC is not set +# end of Test suite diff --git a/cross_tools/configs/config_x86_64 b/cross_tools/configs/config_x86_64 new file mode 100644 index 0000000000000000000000000000000000000000..d276648841225b6bfbda3b5bfd9083de80d23657 --- /dev/null +++ b/cross_tools/configs/config_x86_64 @@ -0,0 +1,1020 @@ +# +# Automatically generated file; DO NOT EDIT. +# crosstool-NG 1.25.0 Configuration +# +CT_CONFIGURE_has_static_link=y +CT_CONFIGURE_has_cxx11=y +CT_CONFIGURE_has_curl=y +CT_CONFIGURE_has_rsync=y +CT_CONFIGURE_has_make_3_81_or_newer=y +CT_CONFIGURE_has_make_4_0_or_newer=y +CT_CONFIGURE_has_libtool_2_4_or_newer=y +CT_CONFIGURE_has_libtoolize_2_4_or_newer=y +CT_CONFIGURE_has_autoconf_2_65_or_newer=y +CT_CONFIGURE_has_autoreconf_2_65_or_newer=y +CT_CONFIGURE_has_automake_1_15_or_newer=y +CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y +CT_CONFIGURE_has_python_3_4_or_newer=y +CT_CONFIGURE_has_bison_2_7_or_newer=y +CT_CONFIGURE_has_python=y +CT_CONFIGURE_has_git=y +CT_CONFIGURE_has_md5sum=y +CT_CONFIGURE_has_sha1sum=y +CT_CONFIGURE_has_sha256sum=y +CT_CONFIGURE_has_sha512sum=y +CT_CONFIGURE_has_install_with_strip_program=y +CT_VERSION="1.25.0" +CT_VCHECK="" +CT_CONFIG_VERSION_ENV="4" +CT_CONFIG_VERSION_CURRENT="4" +CT_CONFIG_VERSION="4" +CT_MODULES=y + +# +# Paths and misc options +# + +# +# crosstool-NG behavior +# +# CT_OBSOLETE is not set +CT_EXPERIMENTAL=y +# CT_ALLOW_BUILD_AS_ROOT is not set +# CT_DEBUG_CT is not set + +# +# Paths +# +CT_LOCAL_TARBALLS_DIR="${HOME}/src" +CT_SAVE_TARBALLS=y +# CT_TARBALLS_BUILDROOT_LAYOUT is not set +CT_WORK_DIR="${CT_TOP_DIR}/.build" +CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_BUILD_DIR="${CT_BUILD_TOP_DIR}/build" +CT_PREFIX_DIR="${CT_PREFIX:-${HOME}/x-tools}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_RM_RF_PREFIX_DIR=y +CT_REMOVE_DOCS=y +CT_INSTALL_LICENSES=y +CT_PREFIX_DIR_RO=y +CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y +# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set + +# +# Downloading +# +CT_DOWNLOAD_AGENT_CURL=y +# CT_DOWNLOAD_AGENT_NONE is not set +# CT_FORBID_DOWNLOAD is not set +# CT_FORCE_DOWNLOAD is not set +CT_CONNECT_TIMEOUT=10 +CT_DOWNLOAD_CURL_OPTIONS="--location --ftp-pasv --retry 3 --fail --silent" +# CT_ONLY_DOWNLOAD is not set +# CT_USE_MIRROR is not set +CT_VERIFY_DOWNLOAD_DIGEST=y +CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y +# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set +CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" +# CT_VERIFY_DOWNLOAD_SIGNATURE is not set + +# +# Extracting +# +# CT_FORCE_EXTRACT is not set +CT_OVERRIDE_CONFIG_GUESS_SUB=y +# CT_ONLY_EXTRACT is not set +CT_PATCH_BUNDLED=y +# CT_PATCH_LOCAL is not set +# CT_PATCH_BUNDLED_LOCAL is not set +# CT_PATCH_LOCAL_BUNDLED is not set +# CT_PATCH_NONE is not set +CT_PATCH_ORDER="bundled" + +# +# Build behavior +# +CT_PARALLEL_JOBS=0 +CT_LOAD="" +CT_USE_PIPES=y +CT_EXTRA_CFLAGS_FOR_BUILD="" +CT_EXTRA_CXXFLAGS_FOR_BUILD="" +CT_EXTRA_LDFLAGS_FOR_BUILD="" +CT_EXTRA_CFLAGS_FOR_HOST="" +CT_EXTRA_LDFLAGS_FOR_HOST="" +# CT_CONFIG_SHELL_SH is not set +# CT_CONFIG_SHELL_ASH is not set +CT_CONFIG_SHELL_BASH=y +# CT_CONFIG_SHELL_CUSTOM is not set +CT_CONFIG_SHELL="${bash}" + +# +# Logging +# +# CT_LOG_ERROR is not set +# CT_LOG_WARN is not set +# CT_LOG_INFO is not set +CT_LOG_EXTRA=y +# CT_LOG_ALL is not set +# CT_LOG_DEBUG is not set +CT_LOG_LEVEL_MAX="EXTRA" +# CT_LOG_SEE_TOOLS_WARN is not set +CT_LOG_PROGRESS_BAR=y +CT_LOG_TO_FILE=y +CT_LOG_FILE_COMPRESS=y +# end of Paths and misc options + +# +# Target options +# +# CT_ARCH_ALPHA is not set +# CT_ARCH_ARC is not set +# CT_ARCH_ARM is not set +# CT_ARCH_AVR is not set +# CT_ARCH_C6X is not set +# CT_ARCH_M68K is not set +# CT_ARCH_MICROBLAZE is not set +# CT_ARCH_MIPS is not set +# CT_ARCH_MOXIE is not set +# CT_ARCH_MSP430 is not set +# CT_ARCH_NIOS2 is not set +# CT_ARCH_POWERPC is not set +# CT_ARCH_PRU is not set +# CT_ARCH_RISCV is not set +# CT_ARCH_S390 is not set +# CT_ARCH_SH is not set +# CT_ARCH_SPARC is not set +CT_ARCH_X86=y +# CT_ARCH_XTENSA is not set +CT_ARCH="x86" +CT_ARCH_CHOICE_KSYM="X86" +CT_ARCH_CPU="" +CT_ARCH_TUNE="" +CT_ARCH_X86_SHOW=y + +# +# Options for x86 +# +CT_ARCH_X86_PKG_KSYM="" +CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR C6X M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 POWERPC PRU RISCV S390 SH SPARC X86 XTENSA" +CT_ARCH_SUFFIX="" +# CT_OMIT_TARGET_VENDOR is not set + +# +# Generic target options +# +# CT_MULTILIB is not set +# CT_DEMULTILIB is not set +CT_ARCH_USE_MMU=y +CT_ARCH_SUPPORTS_32=y +CT_ARCH_SUPPORTS_64=y +CT_ARCH_DEFAULT_32=y +CT_ARCH_BITNESS=64 +# CT_ARCH_32 is not set +CT_ARCH_64=y +CT_ARCH_SUPPORTS_WITH_32_64=y + +# +# Target optimisations +# +CT_ARCH_SUPPORTS_WITH_ARCH=y +CT_ARCH_SUPPORTS_WITH_CPU=y +CT_ARCH_SUPPORTS_WITH_TUNE=y +CT_ARCH_ARCH="" +CT_TARGET_CFLAGS="" +CT_TARGET_LDFLAGS="" +# end of Target options + +# +# Toolchain options +# + +# +# General toolchain options +# +CT_FORCE_SYSROOT=y +CT_USE_SYSROOT=y +CT_SYSROOT_NAME="sysroot" +CT_SYSROOT_DIR_PREFIX="" +CT_WANTS_STATIC_LINK=y +CT_WANTS_STATIC_LINK_CXX=y +# CT_STATIC_TOOLCHAIN is not set +CT_SHOW_CT_VERSION=y +CT_TOOLCHAIN_PKGVERSION="" +CT_TOOLCHAIN_BUGURL="" + +# +# Tuple completion and aliasing +# +CT_TARGET_VENDOR="openeuler" +CT_TARGET_ALIAS_SED_EXPR="" +CT_TARGET_ALIAS="" + +# +# Toolchain type +# +# CT_NATIVE is not set +CT_CROSS=y +# CT_CROSS_NATIVE is not set +# CT_CANADIAN is not set +CT_TOOLCHAIN_TYPE="cross" + +# +# Build system +# +CT_BUILD="" +CT_BUILD_PREFIX="" +CT_BUILD_SUFFIX="" + +# +# Misc options +# +# CT_TOOLCHAIN_ENABLE_NLS is not set +# end of Toolchain options + +# +# Operating System +# +CT_KERNEL_SUPPORTS_SHARED_LIBS=y +# CT_KERNEL_BARE_METAL is not set +CT_KERNEL_LINUX=y +# CT_KERNEL_WINDOWS is not set +CT_KERNEL="linux" +CT_KERNEL_CHOICE_KSYM="LINUX" +CT_KERNEL_LINUX_SHOW=y + +# +# Options for linux +# +CT_KERNEL_LINUX_PKG_KSYM="LINUX" +CT_LINUX_DIR_NAME="linux" +CT_LINUX_USE_WWW_KERNEL_ORG=y +# CT_LINUX_USE_ORACLE is not set +CT_LINUX_USE="LINUX" +CT_LINUX_PKG_NAME="linux" +# CT_LINUX_SRC_RELEASE is not set +# CT_LINUX_SRC_DEVEL is not set +CT_LINUX_SRC_CUSTOM=y +CT_LINUX_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/kernel" +CT_LINUX_PATCH_GLOBAL=y +# CT_LINUX_PATCH_BUNDLED is not set +# CT_LINUX_PATCH_LOCAL is not set +# CT_LINUX_PATCH_BUNDLED_LOCAL is not set +# CT_LINUX_PATCH_LOCAL_BUNDLED is not set +# CT_LINUX_PATCH_NONE is not set +CT_LINUX_PATCH_ORDER="global" +# CT_LINUX_VERY_NEW is not set +CT_LINUX_V_5_16=y +# CT_LINUX_V_5_15 is not set +# CT_LINUX_V_5_14 is not set +# CT_LINUX_V_5_13 is not set +# CT_LINUX_V_5_12 is not set +# CT_LINUX_V_5_11 is not set +# CT_LINUX_V_5_10 is not set +# CT_LINUX_V_5_9 is not set +# CT_LINUX_V_5_8 is not set +# CT_LINUX_V_5_7 is not set +# CT_LINUX_V_5_4 is not set +# CT_LINUX_V_5_3 is not set +# CT_LINUX_V_5_2 is not set +# CT_LINUX_V_5_1 is not set +# CT_LINUX_V_5_0 is not set +# CT_LINUX_V_4_20 is not set +# CT_LINUX_V_4_19 is not set +# CT_LINUX_V_4_18 is not set +# CT_LINUX_V_4_17 is not set +# CT_LINUX_V_4_16 is not set +# CT_LINUX_V_4_15 is not set +# CT_LINUX_V_4_14 is not set +# CT_LINUX_V_4_13 is not set +# CT_LINUX_V_4_12 is not set +# CT_LINUX_V_4_11 is not set +# CT_LINUX_V_4_10 is not set +# CT_LINUX_V_4_9 is not set +# CT_LINUX_V_4_4 is not set +# CT_LINUX_V_4_1 is not set +# CT_LINUX_V_3_16 is not set +# CT_LINUX_V_3_13 is not set +# CT_LINUX_V_3_12 is not set +# CT_LINUX_V_3_10 is not set +# CT_LINUX_V_3_4 is not set +# CT_LINUX_V_3_2 is not set +CT_LINUX_VERSION="5.16.9" +CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" +CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" +CT_LINUX_later_than_5_12=y +CT_LINUX_5_12_or_later=y +CT_LINUX_later_than_5_3=y +CT_LINUX_5_3_or_later=y +CT_LINUX_later_than_4_8=y +CT_LINUX_4_8_or_later=y +CT_LINUX_later_than_3_7=y +CT_LINUX_3_7_or_later=y +CT_LINUX_later_than_3_2=y +CT_LINUX_3_2_or_later=y +CT_LINUX_REQUIRE_3_2_or_later=y +CT_KERNEL_DEP_RSYNC=y +CT_KERNEL_LINUX_VERBOSITY_0=y +# CT_KERNEL_LINUX_VERBOSITY_1 is not set +# CT_KERNEL_LINUX_VERBOSITY_2 is not set +CT_KERNEL_LINUX_VERBOSE_LEVEL=0 +CT_KERNEL_LINUX_INSTALL_CHECK=y +CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" + +# +# Common kernel options +# +CT_SHARED_LIBS=y +# end of Operating System + +# +# Binary utilities +# +CT_ARCH_BINFMT_ELF=y +CT_BINUTILS_BINUTILS=y +CT_BINUTILS="binutils" +CT_BINUTILS_CHOICE_KSYM="BINUTILS" +CT_BINUTILS_BINUTILS_SHOW=y + +# +# Options for binutils +# +CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" +CT_BINUTILS_DIR_NAME="binutils" +CT_BINUTILS_USE_GNU=y +# CT_BINUTILS_USE_LINARO is not set +# CT_BINUTILS_USE_ORACLE is not set +CT_BINUTILS_USE="BINUTILS" +CT_BINUTILS_PKG_NAME="binutils" +# CT_BINUTILS_SRC_RELEASE is not set +# CT_BINUTILS_SRC_DEVEL is not set +CT_BINUTILS_SRC_CUSTOM=y +CT_BINUTILS_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/binutils/binutils-2.37" +CT_BINUTILS_PATCH_GLOBAL=y +# CT_BINUTILS_PATCH_BUNDLED is not set +# CT_BINUTILS_PATCH_LOCAL is not set +# CT_BINUTILS_PATCH_BUNDLED_LOCAL is not set +# CT_BINUTILS_PATCH_LOCAL_BUNDLED is not set +# CT_BINUTILS_PATCH_NONE is not set +CT_BINUTILS_PATCH_ORDER="global" +# CT_BINUTILS_VERY_NEW is not set +CT_BINUTILS_V_2_38=y +# CT_BINUTILS_V_2_37 is not set +# CT_BINUTILS_V_2_36 is not set +# CT_BINUTILS_V_2_35 is not set +# CT_BINUTILS_V_2_34 is not set +# CT_BINUTILS_V_2_33 is not set +# CT_BINUTILS_V_2_32 is not set +# CT_BINUTILS_V_2_31 is not set +# CT_BINUTILS_V_2_30 is not set +# CT_BINUTILS_V_2_29 is not set +# CT_BINUTILS_V_2_28 is not set +# CT_BINUTILS_V_2_27 is not set +# CT_BINUTILS_V_2_26 is not set +CT_BINUTILS_VERSION="2.38" +CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" +CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" +CT_BINUTILS_later_than_2_30=y +CT_BINUTILS_2_30_or_later=y +CT_BINUTILS_later_than_2_27=y +CT_BINUTILS_2_27_or_later=y +CT_BINUTILS_later_than_2_26=y +CT_BINUTILS_2_26_or_later=y + +# +# GNU binutils +# +CT_BINUTILS_GOLD_SUPPORTS_ARCH=y +CT_BINUTILS_GOLD_SUPPORT=y +CT_BINUTILS_FORCE_LD_BFD_DEFAULT=y +# CT_BINUTILS_LINKER_LD is not set +CT_BINUTILS_LINKER_LD_GOLD=y +CT_BINUTILS_GOLD_INSTALLED=y +CT_BINUTILS_GOLD_THREADS=y +CT_BINUTILS_LINKER_BOTH=y +CT_BINUTILS_LINKERS_LIST="ld,gold" +CT_BINUTILS_LD_WRAPPER=y +CT_BINUTILS_LINKER_DEFAULT="bfd" +CT_BINUTILS_PLUGINS=y +CT_BINUTILS_RELRO=m +CT_BINUTILS_DETERMINISTIC_ARCHIVES=y +CT_BINUTILS_EXTRA_CONFIG_ARRAY="" +# CT_BINUTILS_FOR_TARGET is not set +CT_ALL_BINUTILS_CHOICES="BINUTILS" +# end of Binary utilities + +# +# C-library +# +CT_LIBC_GLIBC=y +# CT_LIBC_MUSL is not set +# CT_LIBC_UCLIBC_NG is not set +CT_LIBC="glibc" +CT_LIBC_CHOICE_KSYM="GLIBC" +CT_THREADS="nptl" +CT_LIBC_GLIBC_SHOW=y + +# +# Options for glibc +# +CT_LIBC_GLIBC_PKG_KSYM="GLIBC" +CT_GLIBC_DIR_NAME="glibc" +CT_GLIBC_USE_GNU=y +# CT_GLIBC_USE_ORACLE is not set +CT_GLIBC_USE="GLIBC" +CT_GLIBC_PKG_NAME="glibc" +# CT_GLIBC_SRC_RELEASE is not set +# CT_GLIBC_SRC_DEVEL is not set +CT_GLIBC_SRC_CUSTOM=y +CT_GLIBC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/glibc/glibc-2.36" +CT_GLIBC_PATCH_GLOBAL=y +# CT_GLIBC_PATCH_BUNDLED is not set +# CT_GLIBC_PATCH_LOCAL is not set +# CT_GLIBC_PATCH_BUNDLED_LOCAL is not set +# CT_GLIBC_PATCH_LOCAL_BUNDLED is not set +# CT_GLIBC_PATCH_NONE is not set +CT_GLIBC_PATCH_ORDER="global" +# CT_GLIBC_VERY_NEW is not set +CT_GLIBC_V_2_35=y +# CT_GLIBC_V_2_34 is not set +# CT_GLIBC_V_2_33 is not set +# CT_GLIBC_V_2_32 is not set +# CT_GLIBC_V_2_31 is not set +# CT_GLIBC_V_2_30 is not set +# CT_GLIBC_V_2_29 is not set +# CT_GLIBC_V_2_28 is not set +# CT_GLIBC_V_2_27 is not set +# CT_GLIBC_V_2_26 is not set +# CT_GLIBC_V_2_25 is not set +# CT_GLIBC_V_2_24 is not set +# CT_GLIBC_V_2_23 is not set +# CT_GLIBC_V_2_19 is not set +# CT_GLIBC_V_2_17 is not set +CT_GLIBC_VERSION="2.35" +CT_GLIBC_MIRRORS="$(CT_Mirrors GNU glibc)" +CT_GLIBC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GLIBC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GLIBC_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_GLIBC_SIGNATURE_FORMAT="packed/.sig" +CT_GLIBC_later_than_2_34=y +CT_GLIBC_2_34_or_later=y +CT_GLIBC_later_than_2_32=y +CT_GLIBC_2_32_or_later=y +CT_GLIBC_later_than_2_31=y +CT_GLIBC_2_31_or_later=y +CT_GLIBC_later_than_2_30=y +CT_GLIBC_2_30_or_later=y +CT_GLIBC_later_than_2_29=y +CT_GLIBC_2_29_or_later=y +CT_GLIBC_later_than_2_28=y +CT_GLIBC_2_28_or_later=y +CT_GLIBC_later_than_2_27=y +CT_GLIBC_2_27_or_later=y +CT_GLIBC_later_than_2_26=y +CT_GLIBC_2_26_or_later=y +CT_GLIBC_later_than_2_25=y +CT_GLIBC_2_25_or_later=y +CT_GLIBC_later_than_2_24=y +CT_GLIBC_2_24_or_later=y +CT_GLIBC_later_than_2_23=y +CT_GLIBC_2_23_or_later=y +CT_GLIBC_later_than_2_20=y +CT_GLIBC_2_20_or_later=y +CT_GLIBC_later_than_2_17=y +CT_GLIBC_2_17_or_later=y +CT_GLIBC_later_than_2_14=y +CT_GLIBC_2_14_or_later=y +CT_GLIBC_DEP_KERNEL_HEADERS_VERSION=y +CT_GLIBC_DEP_BINUTILS=y +CT_GLIBC_DEP_GCC=y +CT_GLIBC_DEP_PYTHON=y +CT_GLIBC_BUILD_SSP=y +CT_GLIBC_HAS_LIBIDN_ADDON=y +# CT_GLIBC_USE_LIBIDN_ADDON is not set +CT_GLIBC_NO_SPARC_V8=y +CT_GLIBC_EXTRA_CONFIG_ARRAY="--enable-crypt" +CT_GLIBC_CONFIGPARMS="" +CT_GLIBC_EXTRA_CFLAGS="" +# CT_GLIBC_ENABLE_FORTIFIED_BUILD is not set +# CT_GLIBC_DISABLE_VERSIONING is not set +CT_GLIBC_OLDEST_ABI="" +CT_GLIBC_FORCE_UNWIND=y +# CT_GLIBC_LOCALES is not set +CT_GLIBC_KERNEL_VERSION_NONE=y +# CT_GLIBC_KERNEL_VERSION_AS_HEADERS is not set +# CT_GLIBC_KERNEL_VERSION_CHOSEN is not set +CT_GLIBC_MIN_KERNEL="" +CT_GLIBC_SSP_DEFAULT=y +# CT_GLIBC_SSP_NO is not set +# CT_GLIBC_SSP_YES is not set +# CT_GLIBC_SSP_ALL is not set +# CT_GLIBC_SSP_STRONG is not set +CT_GLIBC_ENABLE_WERROR=y +# CT_GLIBC_ENABLE_COMMON_FLAG is not set +CT_ALL_LIBC_CHOICES="AVR_LIBC BIONIC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE UCLIBC_NG" +CT_LIBC_SUPPORT_THREADS_ANY=y +CT_LIBC_SUPPORT_THREADS_NATIVE=y + +# +# Common C library options +# +CT_THREADS_NATIVE=y +# CT_CREATE_LDSO_CONF is not set +CT_LIBC_XLDD=y +# end of C-library + +# +# C compiler +# +CT_CC_CORE_NEEDED=y +CT_CC_SUPPORT_CXX=y +CT_CC_SUPPORT_FORTRAN=y +CT_CC_SUPPORT_ADA=y +CT_CC_SUPPORT_OBJC=y +CT_CC_SUPPORT_OBJCXX=y +CT_CC_SUPPORT_GOLANG=y +CT_CC_GCC=y +CT_CC="gcc" +CT_CC_CHOICE_KSYM="GCC" +CT_CC_GCC_SHOW=y + +# +# Options for gcc +# +CT_CC_GCC_PKG_KSYM="GCC" +CT_GCC_DIR_NAME="gcc" +CT_GCC_USE_GNU=y +# CT_GCC_USE_LINARO is not set +CT_GCC_USE="GCC" +CT_GCC_PKG_NAME="gcc" +# CT_GCC_SRC_RELEASE is not set +# CT_GCC_SRC_DEVEL is not set +CT_GCC_SRC_CUSTOM=y +CT_GCC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gcc/gcc-10.3.0" +CT_GCC_PATCH_GLOBAL=y +# CT_GCC_PATCH_BUNDLED is not set +# CT_GCC_PATCH_LOCAL is not set +# CT_GCC_PATCH_BUNDLED_LOCAL is not set +# CT_GCC_PATCH_LOCAL_BUNDLED is not set +# CT_GCC_PATCH_NONE is not set +CT_GCC_PATCH_ORDER="global" +# CT_GCC_VERY_NEW is not set +CT_GCC_V_11=y +# CT_GCC_V_10 is not set +# CT_GCC_V_9 is not set +# CT_GCC_V_8 is not set +# CT_GCC_V_7 is not set +# CT_GCC_V_6 is not set +CT_GCC_VERSION="11.2.0" +CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" +CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GCC_SIGNATURE_FORMAT="" +CT_GCC_later_than_11=y +CT_GCC_11_or_later=y +CT_GCC_later_than_10=y +CT_GCC_10_or_later=y +CT_GCC_later_than_9=y +CT_GCC_9_or_later=y +CT_GCC_later_than_8=y +CT_GCC_8_or_later=y +CT_GCC_later_than_7=y +CT_GCC_7_or_later=y +CT_GCC_later_than_6=y +CT_GCC_6_or_later=y +CT_GCC_REQUIRE_6_or_later=y +CT_GCC_later_than_5=y +CT_GCC_5_or_later=y +CT_GCC_REQUIRE_5_or_later=y +CT_GCC_later_than_4_9=y +CT_GCC_4_9_or_later=y +CT_GCC_REQUIRE_4_9_or_later=y +CT_CC_GCC_ENABLE_PLUGINS=y +CT_CC_GCC_GOLD=y +CT_CC_GCC_HAS_LIBMPX=y +CT_CC_GCC_ENABLE_CXX_FLAGS="" +CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_EXTRA_CONFIG_ARRAY="--enable-gnu-indirect-function --with-stage1-ldflags='-Wl,-z,relro,-z,now' --with-boot-ldflags='-Wl,-z,relro,-z,now' --with-tune=generic --with-arch=x86-64 --disable-multilib --with-gnu-as --with-gnu-ld --enable-shared --enable-poison-system-directories --enable-symvers=gnu --disable-bootstrap --enable-default-pie --libdir=\"${CT_PREFIX_DIR}/lib64\" --with-build-time-tools=\"${CT_PREFIX_DIR}/${CT_TARGET}/bin\"" +CT_CC_GCC_STATIC_LIBSTDCXX=y +# CT_CC_GCC_SYSTEM_ZLIB is not set +CT_CC_GCC_CONFIG_TLS=m + +# +# Optimisation features +# +CT_CC_GCC_USE_GRAPHITE=y +CT_CC_GCC_USE_LTO=y +CT_CC_GCC_LTO_ZSTD=m + +# +# Settings for libraries running on target +# +CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y +# CT_CC_GCC_LIBMUDFLAP is not set +# CT_CC_GCC_LIBGOMP is not set +# CT_CC_GCC_LIBSSP is not set +# CT_CC_GCC_LIBQUADMATH is not set +# CT_CC_GCC_LIBSANITIZER is not set +CT_CC_GCC_LIBMPX=y + +# +# Misc. obscure options. +# +CT_CC_CXA_ATEXIT=y +CT_CC_GCC_TM_CLONE_REGISTRY=m +# CT_CC_GCC_DISABLE_PCH is not set +CT_CC_GCC_SJLJ_EXCEPTIONS=m +CT_CC_GCC_LDBL_128=m +# CT_CC_GCC_BUILD_ID is not set +# CT_CC_GCC_LNK_HASH_STYLE_DEFAULT is not set +# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set +# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set +CT_CC_GCC_LNK_HASH_STYLE_BOTH=y +CT_CC_GCC_LNK_HASH_STYLE="both" +CT_CC_GCC_DEC_FLOATS_AUTO=y +# CT_CC_GCC_DEC_FLOATS_BID is not set +# CT_CC_GCC_DEC_FLOATS_DPD is not set +# CT_CC_GCC_DEC_FLOATS_NO is not set +CT_CC_GCC_DEC_FLOATS="" +CT_ALL_CC_CHOICES="GCC" + +# +# Additional supported languages: +# +CT_CC_LANG_CXX=y +# CT_CC_LANG_FORTRAN is not set +# CT_CC_LANG_ADA is not set +# CT_CC_LANG_OBJC is not set +# CT_CC_LANG_OBJCXX is not set +# CT_CC_LANG_GOLANG is not set +CT_CC_LANG_OTHERS="" +# end of C compiler + +# +# Debug facilities +# +# CT_DEBUG_DUMA is not set +CT_DEBUG_GDB=y +CT_DEBUG_GDB_PKG_KSYM="GDB" +CT_GDB_DIR_NAME="gdb" +CT_GDB_PKG_NAME="gdb" +# CT_GDB_SRC_RELEASE is not set +# CT_GDB_SRC_DEVEL is not set +CT_GDB_SRC_CUSTOM=y +CT_GDB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gdb/gdb-12.1" +CT_GDB_PATCH_GLOBAL=y +# CT_GDB_PATCH_BUNDLED is not set +# CT_GDB_PATCH_LOCAL is not set +# CT_GDB_PATCH_BUNDLED_LOCAL is not set +# CT_GDB_PATCH_LOCAL_BUNDLED is not set +# CT_GDB_PATCH_NONE is not set +CT_GDB_PATCH_ORDER="global" +# CT_GDB_VERY_NEW is not set +CT_GDB_V_11=y +# CT_GDB_V_10 is not set +# CT_GDB_V_9 is not set +# CT_GDB_V_8_3 is not set +CT_GDB_VERSION="11.2" +CT_GDB_MIRRORS="$(CT_Mirrors GNU gdb) $(CT_Mirrors sourceware gdb/releases)" +CT_GDB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GDB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GDB_SIGNATURE_FORMAT="" +CT_GDB_later_than_11=y +CT_GDB_11_or_later=y +CT_GDB_later_than_10=y +CT_GDB_10_or_later=y +CT_GDB_later_than_8_3=y +CT_GDB_8_3_or_later=y +CT_GDB_later_than_8_0=y +CT_GDB_8_0_or_later=y +CT_GDB_later_than_7_12=y +CT_GDB_7_12_or_later=y +CT_GDB_later_than_7_11=y +CT_GDB_7_11_or_later=y +CT_GDB_CROSS=y +# CT_GDB_CROSS_STATIC is not set +# CT_GDB_CROSS_SIM is not set +CT_GDB_CROSS_PYTHON=y +CT_GDB_CROSS_PYTHON_BINARY="" +CT_GDB_CROSS_EXTRA_CONFIG_ARRAY="" +# CT_GDB_NATIVE is not set +CT_GDB_GDBSERVER=y +# CT_GDB_NATIVE_BUILD_IPA_LIB is not set +# CT_GDB_NATIVE_STATIC is not set +# CT_GDB_NATIVE_STATIC_LIBSTDCXX is not set +CT_GDB_GDBSERVER_TOPLEVEL=y +# CT_DEBUG_LTRACE is not set +# CT_DEBUG_STRACE is not set +CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" +# end of Debug facilities + +# +# Companion libraries +# +# CT_COMPLIBS_CHECK is not set +# CT_COMP_LIBS_CLOOG is not set +CT_COMP_LIBS_EXPAT=y +CT_COMP_LIBS_EXPAT_PKG_KSYM="EXPAT" +CT_EXPAT_DIR_NAME="expat" +CT_EXPAT_PKG_NAME="expat" +# CT_EXPAT_SRC_RELEASE is not set +# CT_EXPAT_SRC_DEVEL is not set +CT_EXPAT_SRC_CUSTOM=y +CT_EXPAT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/expat/expat-2.5.0" +CT_EXPAT_PATCH_GLOBAL=y +# CT_EXPAT_PATCH_BUNDLED is not set +# CT_EXPAT_PATCH_LOCAL is not set +# CT_EXPAT_PATCH_BUNDLED_LOCAL is not set +# CT_EXPAT_PATCH_LOCAL_BUNDLED is not set +# CT_EXPAT_PATCH_NONE is not set +CT_EXPAT_PATCH_ORDER="global" +CT_EXPAT_VERY_NEW=y +# CT_EXPAT_V_2_4 is not set +CT_EXPAT_VERSION="new" +CT_EXPAT_MIRRORS="http://downloads.sourceforge.net/project/expat/expat/${CT_EXPAT_VERSION} https://github.com/libexpat/libexpat/releases/download/R_${CT_EXPAT_VERSION//./_}" +CT_EXPAT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_EXPAT_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2 .tar.gz" +CT_EXPAT_SIGNATURE_FORMAT="" +CT_COMP_LIBS_GETTEXT=y +CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" +CT_GETTEXT_DIR_NAME="gettext" +CT_GETTEXT_PKG_NAME="gettext" +# CT_GETTEXT_SRC_RELEASE is not set +# CT_GETTEXT_SRC_DEVEL is not set +CT_GETTEXT_SRC_CUSTOM=y +CT_GETTEXT_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gettext/gettext-0.21" +CT_GETTEXT_PATCH_GLOBAL=y +# CT_GETTEXT_PATCH_BUNDLED is not set +# CT_GETTEXT_PATCH_LOCAL is not set +# CT_GETTEXT_PATCH_BUNDLED_LOCAL is not set +# CT_GETTEXT_PATCH_LOCAL_BUNDLED is not set +# CT_GETTEXT_PATCH_NONE is not set +CT_GETTEXT_PATCH_ORDER="global" +# CT_GETTEXT_VERY_NEW is not set +CT_GETTEXT_V_0_21=y +# CT_GETTEXT_V_0_20_1 is not set +# CT_GETTEXT_V_0_19_8_1 is not set +CT_GETTEXT_VERSION="0.21" +CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" +CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" +CT_GETTEXT_0_21_or_later=y +CT_GETTEXT_0_21_or_older=y +CT_GETTEXT_INCOMPATIBLE_WITH_UCLIBC_NG=y + +# +# This version of gettext is not compatible with uClibc-NG. Select +# + +# +# a different version if uClibc-NG is used on the target or (in a +# + +# +# Canadian cross build) on the host. +# +CT_COMP_LIBS_GMP=y +CT_COMP_LIBS_GMP_PKG_KSYM="GMP" +CT_GMP_DIR_NAME="gmp" +CT_GMP_PKG_NAME="gmp" +# CT_GMP_SRC_RELEASE is not set +# CT_GMP_SRC_DEVEL is not set +CT_GMP_SRC_CUSTOM=y +CT_GMP_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/gmp/gmp-6.2.1" +CT_GMP_PATCH_GLOBAL=y +# CT_GMP_PATCH_BUNDLED is not set +# CT_GMP_PATCH_LOCAL is not set +# CT_GMP_PATCH_BUNDLED_LOCAL is not set +# CT_GMP_PATCH_LOCAL_BUNDLED is not set +# CT_GMP_PATCH_NONE is not set +CT_GMP_PATCH_ORDER="global" +# CT_GMP_VERY_NEW is not set +CT_GMP_V_6_2=y +# CT_GMP_V_6_1 is not set +CT_GMP_VERSION="6.2.1" +CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" +CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" +CT_GMP_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_ISL=y +CT_COMP_LIBS_ISL_PKG_KSYM="ISL" +CT_ISL_DIR_NAME="isl" +CT_ISL_PKG_NAME="isl" +# CT_ISL_SRC_RELEASE is not set +# CT_ISL_SRC_DEVEL is not set +CT_ISL_SRC_CUSTOM=y +CT_ISL_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/isl/isl-0.24" +CT_ISL_PATCH_GLOBAL=y +# CT_ISL_PATCH_BUNDLED is not set +# CT_ISL_PATCH_LOCAL is not set +# CT_ISL_PATCH_BUNDLED_LOCAL is not set +# CT_ISL_PATCH_LOCAL_BUNDLED is not set +# CT_ISL_PATCH_NONE is not set +CT_ISL_PATCH_ORDER="global" +# CT_ISL_VERY_NEW is not set +CT_ISL_V_0_24=y +# CT_ISL_V_0_23 is not set +# CT_ISL_V_0_22 is not set +# CT_ISL_V_0_21 is not set +# CT_ISL_V_0_20 is not set +# CT_ISL_V_0_19 is not set +# CT_ISL_V_0_18 is not set +# CT_ISL_V_0_17 is not set +# CT_ISL_V_0_16 is not set +# CT_ISL_V_0_15 is not set +CT_ISL_VERSION="0.24" +CT_ISL_MIRRORS="https://libisl.sourceforge.io" +CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_ISL_SIGNATURE_FORMAT="" +CT_ISL_later_than_0_18=y +CT_ISL_0_18_or_later=y +CT_ISL_later_than_0_15=y +CT_ISL_0_15_or_later=y +# CT_COMP_LIBS_LIBELF is not set +CT_COMP_LIBS_LIBICONV=y +CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" +CT_LIBICONV_DIR_NAME="libiconv" +CT_LIBICONV_PKG_NAME="libiconv" +# CT_LIBICONV_SRC_RELEASE is not set +# CT_LIBICONV_SRC_DEVEL is not set +CT_LIBICONV_SRC_CUSTOM=y +CT_LIBICONV_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libiconv/libiconv-1.16" +CT_LIBICONV_PATCH_GLOBAL=y +# CT_LIBICONV_PATCH_BUNDLED is not set +# CT_LIBICONV_PATCH_LOCAL is not set +# CT_LIBICONV_PATCH_BUNDLED_LOCAL is not set +# CT_LIBICONV_PATCH_LOCAL_BUNDLED is not set +# CT_LIBICONV_PATCH_NONE is not set +CT_LIBICONV_PATCH_ORDER="global" +# CT_LIBICONV_VERY_NEW is not set +CT_LIBICONV_V_1_16=y +# CT_LIBICONV_V_1_15 is not set +CT_LIBICONV_VERSION="1.16" +CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" +CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" +CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPC=y +CT_COMP_LIBS_MPC_PKG_KSYM="MPC" +CT_MPC_DIR_NAME="mpc" +CT_MPC_PKG_NAME="mpc" +# CT_MPC_SRC_RELEASE is not set +# CT_MPC_SRC_DEVEL is not set +CT_MPC_SRC_CUSTOM=y +CT_MPC_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/libmpc/mpc-1.3.1" +CT_MPC_PATCH_GLOBAL=y +# CT_MPC_PATCH_BUNDLED is not set +# CT_MPC_PATCH_LOCAL is not set +# CT_MPC_PATCH_BUNDLED_LOCAL is not set +# CT_MPC_PATCH_LOCAL_BUNDLED is not set +# CT_MPC_PATCH_NONE is not set +CT_MPC_PATCH_ORDER="global" +# CT_MPC_VERY_NEW is not set +CT_MPC_V_1_2=y +# CT_MPC_V_1_1 is not set +# CT_MPC_V_1_0 is not set +CT_MPC_VERSION="1.2.1" +CT_MPC_MIRRORS="http://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" +CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_FORMATS=".tar.gz" +CT_MPC_SIGNATURE_FORMAT="packed/.sig" +CT_MPC_later_than_1_1_0=y +CT_MPC_1_1_0_or_later=y +CT_COMP_LIBS_MPFR=y +CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" +CT_MPFR_DIR_NAME="mpfr" +CT_MPFR_PKG_NAME="mpfr" +# CT_MPFR_SRC_RELEASE is not set +# CT_MPFR_SRC_DEVEL is not set +CT_MPFR_SRC_CUSTOM=y +CT_MPFR_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/mpfr/mpfr-4.1.0" +CT_MPFR_PATCH_GLOBAL=y +# CT_MPFR_PATCH_BUNDLED is not set +# CT_MPFR_PATCH_LOCAL is not set +# CT_MPFR_PATCH_BUNDLED_LOCAL is not set +# CT_MPFR_PATCH_LOCAL_BUNDLED is not set +# CT_MPFR_PATCH_NONE is not set +CT_MPFR_PATCH_ORDER="global" +# CT_MPFR_VERY_NEW is not set +CT_MPFR_V_4_1=y +# CT_MPFR_V_4_0 is not set +# CT_MPFR_V_3_1 is not set +CT_MPFR_VERSION="4.1.0" +CT_MPFR_MIRRORS="http://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" +CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" +CT_MPFR_SIGNATURE_FORMAT="packed/.asc" +CT_MPFR_later_than_4_0_0=y +CT_MPFR_4_0_0_or_later=y +CT_COMP_LIBS_NCURSES=y +CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" +CT_NCURSES_DIR_NAME="ncurses" +CT_NCURSES_PKG_NAME="ncurses" +# CT_NCURSES_SRC_RELEASE is not set +# CT_NCURSES_SRC_DEVEL is not set +CT_NCURSES_SRC_CUSTOM=y +CT_NCURSES_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/ncurses/ncurses-6.4" +CT_NCURSES_PATCH_GLOBAL=y +# CT_NCURSES_PATCH_BUNDLED is not set +# CT_NCURSES_PATCH_LOCAL is not set +# CT_NCURSES_PATCH_BUNDLED_LOCAL is not set +# CT_NCURSES_PATCH_LOCAL_BUNDLED is not set +# CT_NCURSES_PATCH_NONE is not set +CT_NCURSES_PATCH_ORDER="global" +# CT_NCURSES_VERY_NEW is not set +CT_NCURSES_V_6_2=y +# CT_NCURSES_V_6_1 is not set +# CT_NCURSES_V_6_0 is not set +CT_NCURSES_VERSION="6.2" +CT_NCURSES_MIRRORS="https://invisible-mirror.net/archives/ncurses $(CT_Mirrors GNU ncurses)" +CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" +CT_NCURSES_SIGNATURE_FORMAT="packed/.sig" +CT_NCURSES_NEW_ABI=y +CT_NCURSES_HOST_CONFIG_ARGS="" +CT_NCURSES_HOST_DISABLE_DB=y +CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" +CT_NCURSES_TARGET_CONFIG_ARGS="" +# CT_NCURSES_TARGET_DISABLE_DB is not set +CT_NCURSES_TARGET_FALLBACKS="" +CT_COMP_LIBS_ZLIB=y +CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" +CT_ZLIB_DIR_NAME="zlib" +CT_ZLIB_PKG_NAME="zlib" +# CT_ZLIB_SRC_RELEASE is not set +# CT_ZLIB_SRC_DEVEL is not set +CT_ZLIB_SRC_CUSTOM=y +CT_ZLIB_CUSTOM_LOCATION="/usr1/yocto-embedded-tools/cross_tools/open_source/zlib/zlib-1.2.13" +CT_ZLIB_PATCH_GLOBAL=y +# CT_ZLIB_PATCH_BUNDLED is not set +# CT_ZLIB_PATCH_LOCAL is not set +# CT_ZLIB_PATCH_BUNDLED_LOCAL is not set +# CT_ZLIB_PATCH_LOCAL_BUNDLED is not set +# CT_ZLIB_PATCH_NONE is not set +CT_ZLIB_PATCH_ORDER="global" +# CT_ZLIB_VERY_NEW is not set +CT_ZLIB_V_1_2_12=y +CT_ZLIB_VERSION="1.2.12" +CT_ZLIB_MIRRORS="http://downloads.sourceforge.net/project/libpng/zlib/${CT_ZLIB_VERSION} https://www.zlib.net/" +CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" +CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP GNUPRUMCU ISL LIBELF LIBICONV MPC MPFR NCURSES NEWLIB_NANO PICOLIBC ZLIB" +CT_LIBICONV_NEEDED=y +CT_GETTEXT_NEEDED=y +CT_GMP_NEEDED=y +CT_MPFR_NEEDED=y +CT_ISL_NEEDED=y +CT_MPC_NEEDED=y +CT_EXPAT_NEEDED=y +CT_NCURSES_NEEDED=y +CT_ZLIB_NEEDED=y +CT_LIBICONV=y +CT_GETTEXT=y +CT_GMP=y +CT_MPFR=y +CT_ISL=y +CT_MPC=y +CT_EXPAT=y +CT_NCURSES=y +CT_ZLIB=y +# end of Companion libraries + +# +# Companion tools +# +# CT_COMP_TOOLS_FOR_HOST is not set +# CT_COMP_TOOLS_AUTOCONF is not set +# CT_COMP_TOOLS_AUTOMAKE is not set +# CT_COMP_TOOLS_BISON is not set +# CT_COMP_TOOLS_DTC is not set +# CT_COMP_TOOLS_LIBTOOL is not set +# CT_COMP_TOOLS_M4 is not set +# CT_COMP_TOOLS_MAKE is not set +CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" +# end of Companion tools + +# +# Test suite +# +# CT_TEST_SUITE_GCC is not set +# end of Test suite diff --git a/cross_tools/prepare.sh b/cross_tools/prepare.sh new file mode 100755 index 0000000000000000000000000000000000000000..9097e0f4562089aac90d3deb09fa12a381020784 --- /dev/null +++ b/cross_tools/prepare.sh @@ -0,0 +1,142 @@ +#!/bin/bash + +function delete_dir() { + while [ $# != 0 ] ; do + [ -n "$1" ] && rm -rf ./$1 ; shift; done +} + +function do_patch() { + pushd $1 + if [ $1 = "isl" ];then + tar xf $1-0.24.tar.gz + elif [ $1 = "zlib" ];then + tar xf *.tar.* + else + PKG=$(echo *.tar.*) + echo "$1: do_unpack for of $PKG..." + tar xf *.tar.* + echo "make patchlist of $1..." + cat *.spec | grep "Patch" | grep -v "#" |grep "\.patch" | awk -F ":" '{print $2}' > $1-patchlist + ls ${OE_PATCH_DIR}/ | grep "^$1" > $1-patchlist-oe || true + pushd ${PKG%%.tar.*} + for i in `cat ../$1-patchlist` + do + echo "----------------apply patch $i:" + patch -p1 < ../$i + done + for i in `cat ../$1-patchlist-oe` + do + echo "----------------apply patch ${OE_PATCH_DIR}/$i:" + patch -p1 < ${OE_PATCH_DIR}/$i + done + popd + fi + popd + echo "------------do_patch for $1 done!" +} + +function download_and_patch() { + while [ $# != 0 ] ; do + [ -n "$1" ] && echo "Download $1" && git clone -b $COMMON_BRANCH https://gitee.com/src-openeuler/$1.git --depth 1 && do_patch $1; shift; + done +} + +function do_prepare() { + [ ! -d "$LIB_PATH" ] && mkdir $LIB_PATH + pushd $LIB_PATH + delete_dir $KERNEL $GCC $GLIBC $MUSLC $BINUTILS $GMP $MPC $MPFR $ISL $EXPAT $GETTEXT $NCURSES $ZLIB $LIBICONV $GDB + git clone -b $KERNEL_BRANCH https://gitee.com/openeuler/kernel.git --depth 1 + git clone -b $MUSLC_BRANCH https://gitee.com/src-openeuler/musl.git --depth 1 && do_patch musl; + download_and_patch $GCC $GLIBC $BINUTILS $GMP $MPC $MPFR $ISL $EXPAT $NCURSES $ZLIB $GDB + #LIBICONV and GETTEXT dir is need, but with no code, it will skip when ct-ng build under our openeuler env. + mkdir -p $LIB_PATH/$LIBICONV/$LIBICONV_DIR + mkdir -p $LIB_PATH/$GETTEXT/$GETTEXT_DIR + popd +} + +function update_feature() { + # Change GLIBC_DYNAMIC_LINKER to use lib64/xxx.ld for arm64 and lib64/lp64d/xxx.ld for riscv64 + sed -i "s#^\#define GLIBC_DYNAMIC_LINKER.*#\#undef STANDARD_STARTFILE_PREFIX_2\n\#define STANDARD_STARTFILE_PREFIX_2 \"/usr/lib64/\"\n\#define GLIBC_DYNAMIC_LINKER \"/lib%{mabi=lp64:64}%{mabi=ilp32:ilp32}/ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1\"#g" $LIB_PATH/$GCC/$GCC_DIR/gcc/config/aarch64/aarch64-linux.h + sed -i "s#^\#define GLIBC_DYNAMIC_LINKER.*#\#define GLIBC_DYNAMIC_LINKER \"/lib64/lp64d/ld-linux-riscv\" XLEN_SPEC \"-\" ABI_SPEC \".so.1\"#g" $LIB_PATH/$GCC/$GCC_DIR/gcc/config/riscv/linux.h + sed -i "s#^\#define MUSL_DYNAMIC_LINKER.*#\#define MUSL_DYNAMIC_LINKER \"/lib%{mabi=lp64:64}%{mabi=ilp32:ilp32}/ld-musl-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1\"#g" $LIB_PATH/$GCC/$GCC_DIR/gcc/config/aarch64/aarch64-linux.h + sed -i "s#^\#define MUSL_DYNAMIC_LINKER.*#\#define MUSL_DYNAMIC_LINKER \"/lib64/lp64d/ld-musl-riscv\" XLEN_SPEC MUSL_ABI_SUFFIX \".so.1\"#g" $LIB_PATH/$GCC/$GCC_DIR/gcc/config/riscv/linux.h + + # Change libstdc++.so option + sed -i "s#^\\t-o \\$\@.*#\\t-Wl,-z,relro,-z,now,-z,noexecstack -Wtrampolines -o \$\@#g" $LIB_PATH/$GCC/$GCC_DIR/libstdc++-v3/src/Makefile.in +} + +function update_config() { + cp $SRC_DIR/configs/config_* $WORK_DIR/ + sed -i "s#^CT_LINUX_CUSTOM_LOCATION.*#CT_LINUX_CUSTOM_LOCATION=\"$LIB_PATH/kernel\"#g" $WORK_DIR/config_* + sed -i "s#^CT_BINUTILS_CUSTOM_LOCATION.*#CT_BINUTILS_CUSTOM_LOCATION=\"$LIB_PATH/$BINUTILS/$BINUTILS_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_GLIBC_CUSTOM_LOCATION.*#CT_GLIBC_CUSTOM_LOCATION=\"$LIB_PATH/$GLIBC/$GLIBC_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_MUSL_CUSTOM_LOCATION.*#CT_MUSL_CUSTOM_LOCATION=\"$LIB_PATH/$MUSLC/$MUSLC_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_GCC_CUSTOM_LOCATION.*#CT_GCC_CUSTOM_LOCATION=\"$LIB_PATH/$GCC/$GCC_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_GDB_CUSTOM_LOCATION.*#CT_GDB_CUSTOM_LOCATION=\"$LIB_PATH/$GDB/$GDB_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_GMP_CUSTOM_LOCATION.*#CT_GMP_CUSTOM_LOCATION=\"$LIB_PATH/$GMP/$GMP_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_ISL_CUSTOM_LOCATION.*#CT_ISL_CUSTOM_LOCATION=\"$LIB_PATH/$ISL/$ISL_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_MPC_CUSTOM_LOCATION.*#CT_MPC_CUSTOM_LOCATION=\"$LIB_PATH/$MPC/$MPC_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_MPFR_CUSTOM_LOCATION.*#CT_MPFR_CUSTOM_LOCATION=\"$LIB_PATH/$MPFR/$MPFR_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_EXPAT_CUSTOM_LOCATION.*#CT_EXPAT_CUSTOM_LOCATION=\"$LIB_PATH/$EXPAT/$EXPAT_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_LIBICONV_CUSTOM_LOCATION.*#CT_LIBICONV_CUSTOM_LOCATION=\"$LIB_PATH/$LIBICONV/$LIBICONV_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_GETTEXT_CUSTOM_LOCATION.*#CT_GETTEXT_CUSTOM_LOCATION=\"$LIB_PATH/$GETTEXT/$GETTEXT_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_NCURSES_CUSTOM_LOCATION.*#CT_NCURSES_CUSTOM_LOCATION=\"$LIB_PATH/$NCURSES/$NCURSES_DIR\"#g" $WORK_DIR/config_* + sed -i "s#^CT_ZLIB_CUSTOM_LOCATION.*#CT_ZLIB_CUSTOM_LOCATION=\"$LIB_PATH/$ZLIB/$ZLIB_DIR\"#g" $WORK_DIR/config_* +} + +usage() +{ + echo -e "Tip: sh "$THIS_SCRIPT" \n" +} + +check_use() +{ + if [ -n "$BASH_SOURCE" ]; then + THIS_SCRIPT="$BASH_SOURCE" + elif [ -n "$ZSH_NAME" ]; then + THIS_SCRIPT="$0" + else + THIS_SCRIPT="$(pwd)/prepare.sh" + if [ ! -e "$THIS_SCRIPT" ]; then + echo "Error: $THIS_SCRIPT doesn't exist!" + return 1 + fi + fi + + if [ "$0" != "$THIS_SCRIPT" ]; then + echo "Error: This script cannot be sourced. Please run as 'sh $THIS_SCRIPT'" >&2 + return 1 + fi +} + +main() +{ + usage + check_use || return 1 + set -e + WORK_DIR="$1" + SRC_DIR="$(cd $(dirname $0)/;pwd)" + SRC_DIR="$(realpath ${SRC_DIR})" + if [[ -z "${WORK_DIR}" ]];then + WORK_DIR=$SRC_DIR + echo "use default work dir: $WORK_DIR" + fi + WORK_DIR="$(realpath ${WORK_DIR})" + source $SRC_DIR/configs/config.xml + OE_PATCH_DIR="$SRC_DIR/patches" + readonly LIB_PATH="$WORK_DIR/open_source" + + do_prepare + update_feature + update_config + + cd $WORK_DIR + echo "Prepare done! Now you can run: (not in root please)" + echo "'cp config_arm32 .config && ct-ng build' for build arm" + echo "'cp config_aarch64 .config && ct-ng build' for build arm64" + echo "'cp config_x86_64 .config && ct-ng build' for build x86_64" + echo "'cp config_riscv64 .config && ct-ng build' for build riscv64" + echo "'cp config_aarch64-musl .config && ct-ng build' for build muslc_aarch64" +} + +main "$@" diff --git a/dockerfile/Dockerfile b/dockerfile/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..abd508e277af8820cf380bbd6afc7e96ddc434f6 --- /dev/null +++ b/dockerfile/Dockerfile @@ -0,0 +1,54 @@ +# base image +FROM openeuler/openeuler:22.03-lts + +# MAINTAINER +MAINTAINER harvey + +# yum install +RUN set -eux; \ + yum -y install git java tar cmake gperf sqlite-devel libffi-devel xz-devel zlib zlib-devel openssl-devel bzip2-devel ncurses-devel readline-devel libpcap-devel parted autoconf-archive chrpath gcc-c++ patch rpm-build flex autoconf automake m4 bison bc libtool gettext-devel createrepo_c git net-tools wget sudo hostname rpcgen texinfo python meson dosfstools mtools libmpc-devel gmp-devel ninja-build numactl-devel make + +# gcc install +ARG tools_dir=/usr1/tools +ARG gcc_install_dir=/usr1/openeuler/gcc + +# change dir to workdir and star install +WORKDIR ${tools_dir} +RUN wget https://repo.openeuler.org/openEuler-22.03-LTS/EPOL/main/x86_64/Packages/gcc-cross-1.0-0.oe2203.x86_64.rpm -P ${tools_dir} +RUN rpm2cpio ${tools_dir}/gcc-cross-1.0-0.oe2203.x86_64.rpm | cpio -id + +# change dir to workdir and unpack +WORKDIR ${gcc_install_dir} +RUN tar -xf ${tools_dir}/tmp/openeuler_gcc_arm32le.tar.gz +RUN find ./openeuler_gcc_arm32le -type d | xargs chmod go+x +RUN chmod go+r ./openeuler_gcc_arm32le -R +RUN chmod -R 755 /usr1/openeuler/gcc/openeuler_gcc_arm32le/bin + +RUN tar -xf ${tools_dir}/tmp/openeuler_gcc_arm64le.tar.gz +RUN find ./openeuler_gcc_arm64le -type d | xargs chmod go+x +RUN chmod go+r ./openeuler_gcc_arm64le -R +RUN chmod -R 755 /usr1/openeuler/gcc/openeuler_gcc_arm64le/bin +RUN find ./ | xargs chmod go+x + + +# clean install package +WORKDIR /usr1 +RUN rm -rf tools + +ARG VERSION=4.3 +ARG user=openeuler +ARG group=openeuler +ARG uid=1000 +ARG gid=1000 +ARG AGENT_WORKDIR=/home/agent + +# add build user +RUN groupadd -g ${gid} ${group} +RUN useradd -c "openeuler" -d /home/${user} -u ${uid} -g ${gid} -m ${user} +RUN echo "${user} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers + +# modify /usr1 owner +RUN chown -R openeuler:users /usr1 + +# modify default configs +RUN sed -i 's/TMOUT=300/TMOUT=/g' /etc/bashrc diff --git a/dockerfile/Dockerfile_CI b/dockerfile/Dockerfile_CI new file mode 100644 index 0000000000000000000000000000000000000000..5ad68aa392af18b398a1b535fbe4e0664adae601 --- /dev/null +++ b/dockerfile/Dockerfile_CI @@ -0,0 +1,73 @@ +# base image +FROM openeuler/openeuler:22.03-lts + +# MAINTAINER +MAINTAINER harvey + +# yum install +RUN set -eux; \ + yum -y install git java tar cmake gperf sqlite-devel libffi-devel xz-devel zlib zlib-devel openssl-devel bzip2-devel ncurses-devel readline-devel libpcap-devel parted autoconf-archive chrpath gcc-c++ patch rpm-build flex autoconf automake m4 bison bc libtool gettext-devel createrepo_c git net-tools wget sudo hostname rpcgen texinfo python meson dosfstools mtools libmpc-devel gmp-devel ninja-build numactl-devel make + +# gcc install +ARG tools_dir=/usr1/tools +ARG gcc_install_dir=/usr1/openeuler/gcc + +# change dir to workdir and star install +WORKDIR ${tools_dir} +RUN wget https://repo.openeuler.org/openEuler-22.03-LTS/EPOL/main/x86_64/Packages/gcc-cross-1.0-0.oe2203.x86_64.rpm -P ${tools_dir} +RUN rpm2cpio ${tools_dir}/gcc-cross-1.0-0.oe2203.x86_64.rpm | cpio -id + +# change dir to workdir and unpack +WORKDIR ${gcc_install_dir} +RUN tar -xf ${tools_dir}/tmp/openeuler_gcc_arm32le.tar.gz +RUN find ./openeuler_gcc_arm32le -type d | xargs chmod go+x +RUN chmod go+r ./openeuler_gcc_arm32le -R +RUN chmod -R 755 /usr1/openeuler/gcc/openeuler_gcc_arm32le/bin + +RUN tar -xf ${tools_dir}/tmp/openeuler_gcc_arm64le.tar.gz +RUN find ./openeuler_gcc_arm64le -type d | xargs chmod go+x +RUN chmod go+r ./openeuler_gcc_arm64le -R +RUN chmod -R 755 /usr1/openeuler/gcc/openeuler_gcc_arm64le/bin +RUN find ./ | xargs chmod go+x + + +# clean install package +WORKDIR /usr1 +RUN rm -rf tools + +ARG VERSION=4.3 +ARG user=jenkins +ARG group=jenkins +ARG uid=1000 +ARG gid=1000 +ARG AGENT_WORKDIR=/home/${user}/agent + +RUN curl --create-dirs -fsSLo /usr/share/jenkins/agent.jar https://repo.jenkins-ci.org/public/org/jenkins-ci/main/remoting/${VERSION}/remoting-${VERSION}.jar \ + && chmod 755 /usr/share/jenkins \ + && chmod 644 /usr/share/jenkins/agent.jar \ + && ln -sf /usr/share/jenkins/agent.jar /usr/share/jenkins/slave.jar + +RUN curl --create-dirs -fsSLo /usr/local/bin/jenkins-agent http://121.36.53.23/AdoptOpenJDK/jenkins-agent +#COPY jenkins-agent /usr/local/bin/jenkins-agent + +RUN chmod a+rx /usr/local/bin/jenkins-agent \ + && ln -s /usr/local/bin/jenkins-agent /usr/local/bin/jenkins-slave + +RUN groupadd -g ${gid} ${group} +RUN useradd -c "Jenkins user" -d /home/${user} -u ${uid} -g ${gid} -m ${user} +RUN echo "${user} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers + + +USER ${user} +ENV AGENT_WORKDIR=${AGENT_WORKDIR} +RUN mkdir /home/${user}/.jenkins && mkdir -p ${AGENT_WORKDIR} + +VOLUME /home/${user}/.jenkins +VOLUME ${AGENT_WORKDIR} +WORKDIR ${AGENT_WORKDIR} + +# add build user +RUN sudo useradd -m openeuler + + +ENTRYPOINT ["jenkins-agent"] diff --git a/dockerfile/qemu_test_dockerfile b/dockerfile/qemu_test_dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8c3be02f60c0b7382a21f639da0b764bee703e36 --- /dev/null +++ b/dockerfile/qemu_test_dockerfile @@ -0,0 +1,128 @@ +FROM openeuler/openeuler:22.03-lts + +# MAINTAINER +MAINTAINER harvey + +# yum install +RUN set -eux; \ + yum -y install iputils git java tar cmake gperf sqlite-devel \ + libffi-devel xz-devel zlib zlib-devel openssl-devel bzip2-devel \ + ncurses-devel readline-devel libpcap-devel parted autoconf-archive \ + chrpath gcc-c++ patch rpm-build flex autoconf automake m4 bison bc \ + libtool gettext-devel createrepo_c git net-tools wget sudo hostname \ + rpcgen texinfo python meson dosfstools mtools libmpc-devel gmp-devel \ + ninja-build numactl-devel make glib2 glib2-devel java-latest-openjdk.x86_64 \ + expect psmisc bridge-utils + +# gcc install +ARG tools_dir=/usr1/tools +ARG gcc_install_dir=/usr1/openeuler/gcc + +# change dir to workdir and star install +WORKDIR ${tools_dir} +RUN wget https://repo.openeuler.org/openEuler-22.03-LTS/EPOL/main/x86_64/Packages/gcc-cross-1.0-0.oe2203.x86_64.rpm -P ${tools_dir} +RUN rpm2cpio ${tools_dir}/gcc-cross-1.0-0.oe2203.x86_64.rpm | cpio -id + +# change dir to workdir and unpack +WORKDIR ${gcc_install_dir} +RUN tar -xf ${tools_dir}/tmp/openeuler_gcc_arm32le.tar.gz +RUN find ./openeuler_gcc_arm32le -type d | xargs chmod go+x +RUN chmod go+r ./openeuler_gcc_arm32le -R +RUN chmod -R 755 /usr1/openeuler/gcc/openeuler_gcc_arm32le/bin + +RUN tar -xf ${tools_dir}/tmp/openeuler_gcc_arm64le.tar.gz +RUN find ./openeuler_gcc_arm64le -type d | xargs chmod go+x +RUN chmod go+r ./openeuler_gcc_arm64le -R +RUN chmod -R 755 /usr1/openeuler/gcc/openeuler_gcc_arm64le/bin +RUN find ./ | xargs chmod go+x + + +# python install +ARG python_install_dir=/opt/buildtools/python-3.9.2 + +# change dir to workdir and start install +WORKDIR /usr1/tools +RUN wget https://www.python.org/ftp/python/3.9.2/Python-3.9.2.tgz +RUN tar -xf Python-3.9.2.tgz +RUN rm -rf /usr/local/bin/python3 /usr/local/bin/python +WORKDIR /usr1/tools/Python-3.9.2 +RUN ./configure --prefix=/opt/buildtools/python-3.9.2 --enable-loadable-sqlite-extensions +RUN make -j 8 && make install +RUN ln -s ${python_install_dir}/bin/python3 /usr/local/bin/python3 +RUN ln -s ${python_install_dir}/bin/python3 /usr/local/bin/python +RUN export PYTHONPATH=${python_install_dir}/lib64/python3.9/lib-dynload/ +RUN export PYTHONPATH="${python_install_dir}/lib/python3.9/site-packages/:${python_install_dir}:${python_install_dir}/lib64/python3.9/lib-dynload/" + +# ninja install +ARG ninja_install_dir="/opt/buildtools/ninja-1.10.1" + +# change dir to workdir and start install +WORKDIR /usr1/tools +RUN wget https://distfiles.macports.org/ninja/ninja-1.10.1.tar.gz +RUN tar -xf ninja-1.10.1.tar.gz +WORKDIR /usr1/tools/ninja-1.10.1 +RUN sed -ie '1c#!/usr/bin/env python3' *.py +RUN ./configure.py --bootstrap +RUN mkdir -p ${ninja_install_dir}/bin +RUN install -m 0755 ./ninja ${ninja_install_dir}/bin + +# change access permission +WORKDIR /opt/buildtools +RUN find ./ -type d | xargs chmod 755 + +# clean install package +WORKDIR /usr1 +RUN rm -rf tools + +# install qemu +RUN wget https://download.qemu.org/qemu-7.0.0.tar.xz +RUN tar xvJf qemu-7.0.0.tar.xz +WORKDIR /usr1/qemu-7.0.0 +RUN yum -y install pixman-devel +RUN ./configure +RUN make +RUN make install + +RUN ln -s /opt/buildtools/python-3.9.2/bin/pip3 /usr/bin/pip +RUN ln -s /opt/buildtools/python-3.9.2/bin/pip3 /usr/bin/pip3 +RUN python -m pip install --upgrade pip + +RUN pip install six paramiko==2.7.2 + +###################################################install jenkins########################################################## + +ARG VERSION=4.3 +ARG user=jenkins +ARG group=jenkins +ARG uid=1000 +ARG gid=1000 +ARG AGENT_WORKDIR=/home/${user}/agent + +RUN curl --create-dirs -fsSLo /usr/share/jenkins/agent.jar https://repo.jenkins-ci.org/public/org/jenkins-ci/main/remoting/${VERSION}/remoting-${VERSION}.jar \ + && chmod 755 /usr/share/jenkins \ + && chmod 644 /usr/share/jenkins/agent.jar \ + && ln -sf /usr/share/jenkins/agent.jar /usr/share/jenkins/slave.jar + +RUN curl --create-dirs -fsSLo /usr/local/bin/jenkins-agent http://121.36.53.23/AdoptOpenJDK/jenkins-agent +#COPY jenkins-agent /usr/local/bin/jenkins-agent + +#RUN chmod a+rx /usr/local/openjdk-11 \ +RUN chmod a+rx /usr/local/bin/jenkins-agent \ + && ln -s /usr/local/bin/jenkins-agent /usr/local/bin/jenkins-slave + +RUN groupadd -g ${gid} ${group} +RUN useradd -c "Jenkins user" -d /home/${user} -u ${uid} -g ${gid} -m ${user} +RUN echo "${user} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers + + +USER ${user} +ENV AGENT_WORKDIR=${AGENT_WORKDIR} +RUN mkdir /home/${user}/.jenkins && mkdir -p ${AGENT_WORKDIR} + +VOLUME /home/${user}/.jenkins +VOLUME ${AGENT_WORKDIR} +WORKDIR ${AGENT_WORKDIR} + +RUN sudo sed -i 's/TMOUT=300/TMOUT=/g' /etc/bashrc + +ENTRYPOINT ["jenkins-agent"] diff --git a/dsoftbus/build_tools/README b/dsoftbus/build_tools/README new file mode 100644 index 0000000000000000000000000000000000000000..e0ef1aad5e904d3e4cdb3437c488329d498cf989 --- /dev/null +++ b/dsoftbus/build_tools/README @@ -0,0 +1,9 @@ +[gn] +from: +https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz +https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz.sha256 + +[ninja] +from: +https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.10.1/linux/ninja-linux-x86-1.10.1.tar.gz +https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.10.1/linux/ninja-linux-x86-1.10.1.tar.gz.sha256 diff --git a/dsoftbus/build_tools/gn-linux-x86-1717.tar.gz b/dsoftbus/build_tools/gn-linux-x86-1717.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..5eab38ee24ce22ac01ed7bcb36e4e689f8863364 Binary files /dev/null and b/dsoftbus/build_tools/gn-linux-x86-1717.tar.gz differ diff --git a/dsoftbus/build_tools/gn-linux-x86-1717.tar.gz.sha256 b/dsoftbus/build_tools/gn-linux-x86-1717.tar.gz.sha256 new file mode 100644 index 0000000000000000000000000000000000000000..5d6fbf55f787764b241064955f1ac6b6eff13143 --- /dev/null +++ b/dsoftbus/build_tools/gn-linux-x86-1717.tar.gz.sha256 @@ -0,0 +1 @@ +37111f49ebceee9f78ebe7b1d68079a5ea5b6b13d5411fada06bffc368783d01 diff --git a/dsoftbus/hichain_sample/hichain_main.c b/dsoftbus/hichain_sample/hichain_main.c new file mode 100644 index 0000000000000000000000000000000000000000..acc0a127affd7451115b6f6f403ac592bd7f8aa4 --- /dev/null +++ b/dsoftbus/hichain_sample/hichain_main.c @@ -0,0 +1,639 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define APP_ID "hichain_test" +#define SESSION_NAME "com.huawei.devicegroupmanage.test" +#define DEFAULT_CAPABILITY "osdCapability" +#define PUBLISH_ID 100 + +#define DEFAULT_GROUP_NAME "dsoftbus" +#define DEFAULT_PIN_CODE "123456" +#define MAX_UDID_LEN 65 +#define MAX_GROUP_LEN 65 +#define ERR_RET -1 + +#define FIELD_ETH_IP "ETH_IP" +#define FIELD_ETH_PORT "ETH_PORT" +#define FIELD_WLAN_IP "WIFI_IP" +#define FIELD_WLAN_PORT "WIFI_PORT" + +enum { + DEVICE_DISCOVERY = 0, + DEVICE_JOINING, + DEVICE_ONLINE, +}DeviceStatus; + +char *g_deviceStatus[] = { + "discovery", + "joining", + "online", +}; + +typedef struct DeviceList { + struct DeviceList *next; + DeviceInfo device; + int status; + int64_t requestId; +} DeviceList; +DeviceList *g_deviceListHead = NULL; + +static const DeviceGroupManager *g_hichainGmInstance = NULL; +static char g_udid[MAX_UDID_LEN]; +static char g_groupId[MAX_GROUP_LEN]; +static int64_t g_requestId = 1; + +static const char *GetStringFromJson(const cJSON *obj, const char *key) +{ + cJSON *item; + + if (obj == NULL || key == NULL) + return NULL; + + item = cJSON_GetObjectItemCaseSensitive(obj, key); + if (item != NULL && cJSON_IsString(item)) { + return cJSON_GetStringValue(item); + } else { + int len = cJSON_GetArraySize(obj); + for (int i = 0; i < len; i++) { + item = cJSON_GetArrayItem(obj, i); + if (cJSON_IsObject(item)) { + const char *value = GetStringFromJson(item, key); + if (value != NULL) + return value; + } + } + } + return NULL; +} + +static int HichainSaveGroupID(const char *param) +{ + cJSON *msg = cJSON_Parse(param); + const char *value = NULL; + + if (msg == NULL) { + printf("HichainSaveGroupID: cJSON_Parse fail\n"); + return ERR_RET; + } + + value = GetStringFromJson(msg, FIELD_GROUP_ID); + if (value == NULL) { + printf("HichainSaveGroupID:GetStringFromJson fail\n"); + cJSON_Delete(msg); + return ERR_RET; + } + + memcpy_s(g_groupId, MAX_GROUP_LEN, value, strlen(value)); + printf("HichainSaveGroupID:groupID=%s\n", g_groupId); + + cJSON_Delete(msg); + return 0; +} + +static void HiChainGmOnFinish(int64_t requestId, int operationCode, const char *returnData) +{ + if (operationCode == GROUP_CREATE && returnData != NULL) { + printf("create new group finish:requestId=%lld, returnData=%s\n", requestId, returnData); + HichainSaveGroupID(returnData); + } else if (operationCode == MEMBER_JOIN) { + DeviceList *node = g_deviceListHead; + + printf("member join finish:requestId=%lld, returnData=%s\n", requestId, returnData); + while (node) { + if (node->requestId != requestId) { + node = node->next; + continue; + } + node->status = DEVICE_ONLINE; + break; + } + } else { + printf("CB:requestId=%lld, operationCode=%d, returnData=%s\n", requestId, operationCode, returnData); + } +} + +static void HiChainGmOnError(int64_t requestId, int operationCode, int errorCode, const char *errorReturn) +{ + DeviceList *node = g_deviceListHead; + + printf("CB:requestId=%lld, operationCode=%d, errorCode=%d, errorReturn=%s\n", requestId, operationCode, errorCode, errorReturn); + while (node) { + if (node->requestId != requestId) { + node = node->next; + continue; + } + node->status = DEVICE_DISCOVERY; + break; + } +} + +static char *HiChainGmOnRuest(int64_t requestId, int operationCode, const char *reqParams) +{ + cJSON *msg = cJSON_CreateObject(); + char *param = NULL; + + printf("CB:requestId=%lld, operationCode=%d, reqParams=%s", requestId, operationCode, reqParams); + + if (operationCode != MEMBER_JOIN) { + return NULL; + } + + if (msg == NULL) { + printf("HiChainGmOnRuest: cJSON_CreateObject fail\n"); + } + + if (cJSON_AddNumberToObject(msg, FIELD_CONFIRMATION, REQUEST_ACCEPTED) == NULL || + cJSON_AddStringToObject(msg, FIELD_PIN_CODE, DEFAULT_PIN_CODE) == NULL || + cJSON_AddStringToObject(msg, FIELD_DEVICE_ID, g_udid) == NULL) { + printf("HiChainGmOnRuest: cJSON_AddToObject fail\n"); + cJSON_Delete(msg); + return NULL; + } + + param = cJSON_PrintUnformatted(msg); + cJSON_Delete(msg); + return param; +} + +static const DeviceAuthCallback g_groupManagerCallback = { + .onRequest = HiChainGmOnRuest, + .onError = HiChainGmOnError, + .onFinish = HiChainGmOnFinish, +}; + +static int HichainGmRegCallback(void) +{ + return g_hichainGmInstance->regCallback(APP_ID, &g_groupManagerCallback); +} + +static void HichainGmUnRegCallback(void) +{ + g_hichainGmInstance->unRegCallback(APP_ID); +} + +static int HichainGmGetGroupInfo(char **groupVec, uint32_t *num) +{ + cJSON *msg = cJSON_CreateObject(); + char *param = NULL; + int ret = ERR_RET; + + if (msg == NULL) { + printf("HichainGmGetGroupInfo: cJSON_CreateObject fail\n"); + return ret; + } + + if (cJSON_AddNumberToObject(msg, FIELD_GROUP_TYPE, PEER_TO_PEER_GROUP) == NULL || + cJSON_AddStringToObject(msg, FIELD_GROUP_NAME, DEFAULT_GROUP_NAME) == NULL || + cJSON_AddNumberToObject(msg, FIELD_GROUP_VISIBILITY, GROUP_VISIBILITY_PUBLIC) == NULL) { + printf("HichainGmGetGroupInfo: cJSON_AddToObject fail\n"); + goto err_cJSON_Delete; + } + param = cJSON_PrintUnformatted(msg); + if (param == NULL) { + printf("HichainGmGetGroupInfo: cJSON_PrintUnformatted fail\n"); + goto err_cJSON_Delete; + } + + ret = g_hichainGmInstance->getGroupInfo(ANY_OS_ACCOUNT, APP_ID, param, groupVec, num); + if (ret != 0) { + printf("getGroupInfo fail:%d", ret); + goto err_getGroupInfo; + } + +err_getGroupInfo: + cJSON_free(param); +err_cJSON_Delete: + cJSON_Delete(msg); + return ret; +} + +static void HichainGmDestroyGroupInfo(char **groupVec) +{ + g_hichainGmInstance->destroyInfo(groupVec); +} + +static int HichainGmCreatGroup(void) +{ + cJSON *msg = cJSON_CreateObject(); + char *param = NULL; + int ret = ERR_RET; + + if (msg == NULL) + return ret; + + if (cJSON_AddNumberToObject(msg, FIELD_GROUP_TYPE, PEER_TO_PEER_GROUP) == NULL || + cJSON_AddStringToObject(msg, FIELD_DEVICE_ID, g_udid) == NULL || + cJSON_AddStringToObject(msg, FIELD_GROUP_NAME, DEFAULT_GROUP_NAME) == NULL || + cJSON_AddNumberToObject(msg, FIELD_USER_TYPE, 0) == NULL || + cJSON_AddNumberToObject(msg, FIELD_GROUP_VISIBILITY, GROUP_VISIBILITY_PUBLIC) == NULL || + cJSON_AddNumberToObject(msg, FIELD_EXPIRE_TIME, EXPIRE_TIME_MAX) == NULL) { + printf("HichainGmCreatGroup: cJSON_AddToObject fail\n"); + cJSON_Delete(msg); + return ret; + } + param = cJSON_PrintUnformatted(msg); + if (param == NULL) { + printf("HichainGmCreatGroup: cJSON_PrintUnformatted fail\n"); + cJSON_Delete(msg); + return ret; + } + + ret = g_hichainGmInstance->createGroup(ANY_OS_ACCOUNT, g_requestId++, APP_ID, param); + + cJSON_free(param); + cJSON_Delete(msg); + return ret; +} + +static bool HichainIsDeviceInGroup(const char *groupId, const char *devId) +{ + return g_hichainGmInstance->isDeviceInGroup(ANY_OS_ACCOUNT, APP_ID, groupId, devId); +} + +static int HichainGmAddMemberToGroup(DeviceInfo *device, const char *groupId) +{ + cJSON *msg = cJSON_CreateObject(); + cJSON *addr = NULL; + char *param = NULL; + int ret = ERR_RET; + + if (msg == NULL) { + printf("HichainGmAddMemberToGroup: cJSON_CreateObject1 fail\n"); + return ret; + } + + addr = cJSON_CreateObject(); + if (addr == NULL) { + printf("HichainGmAddMemberToGroup: cJSON_CreateObject2 fail\n"); + goto err_cJSON_CreateObject; + } + + for (unsigned int i = 0; i < device->addrNum; i++) { + if (device->addr[i].type == CONNECTION_ADDR_ETH) { + if (cJSON_AddStringToObject(addr, FIELD_ETH_IP, device->addr[i].info.ip.ip) == NULL || + cJSON_AddNumberToObject(addr, FIELD_ETH_PORT, device->addr[i].info.ip.port) == NULL) { + printf("HichainGmAddMemberToGroup: cJSON_AddToObject1 fail\n"); + goto err_cJSON_AddToObject; + } + } else if (device->addr[i].type == CONNECTION_ADDR_WLAN) { + if (cJSON_AddStringToObject(addr, FIELD_WLAN_IP, device->addr[i].info.ip.ip) == NULL || + cJSON_AddNumberToObject(addr, FIELD_WLAN_PORT, device->addr[i].info.ip.port) == NULL) { + printf("HichainGmAddMemberToGroup: cJSON_AddToObject2 fail\n"); + goto err_cJSON_AddToObject; + } + } else { + printf("unsupport connection type:%d\n", device->addr[i].type); + goto err_cJSON_AddToObject; + } + } + + param = cJSON_PrintUnformatted(addr); + if (param == NULL) { + printf("HichainGmAddMemberToGroup: cJSON_PrintUnformatted1 fail\n"); + goto err_cJSON_AddToObject; + } + + if (cJSON_AddStringToObject(msg, FIELD_GROUP_ID, groupId) == NULL || + cJSON_AddNumberToObject(msg, FIELD_GROUP_TYPE, PEER_TO_PEER_GROUP) == NULL || + cJSON_AddStringToObject(msg, FIELD_PIN_CODE, DEFAULT_PIN_CODE) == NULL || + cJSON_AddStringToObject(msg, FIELD_DEVICE_ID, g_udid) == NULL || + cJSON_AddStringToObject(msg, FIELD_GROUP_NAME, DEFAULT_GROUP_NAME) == NULL || + cJSON_AddBoolToObject(msg, FIELD_IS_ADMIN, false) == NULL || + cJSON_AddStringToObject(msg, FIELD_CONNECT_PARAMS, param) == NULL) { + printf("HichainGmAddMemberToGroup: cJSON_AddToObject4 fail\n"); + goto err_cJSON_AddToObject1; + } + + cJSON_free(param); + param = cJSON_PrintUnformatted(msg); + if (param == NULL) { + printf("HichainGmAddMemberToGroup: cJSON_PrintUnformatted fail\n"); + goto err_cJSON_CreateObject; + } + + ret = g_hichainGmInstance->addMemberToGroup(ANY_OS_ACCOUNT, g_requestId++, APP_ID, param); + if (ret != 0) { + printf("addMemberToGroup fail:%d\n", ret); + } + +err_cJSON_AddToObject1: + cJSON_free(param); +err_cJSON_AddToObject: + cJSON_Delete(addr); +err_cJSON_CreateObject: + cJSON_Delete(msg); + return ret; +} + +static int HichainInit(void) +{ + char *groupVec = NULL; + uint32_t num; + int ret; + + ret = InitDeviceAuthService(); + if (ret != 0) { + printf("InitDeviceAuthService fail:%d\n", ret); + return ret; + } + + g_hichainGmInstance = GetGmInstance(); + if (g_hichainGmInstance == NULL) { + printf("GetGmInstance fail\n"); + ret = ERR_RET; + goto err_GetGmInstance; + } + + ret = HichainGmRegCallback(); + if (ret != 0) { + printf("HichainGmregCallback fail.:%d\n", ret); + goto err_HichainGmRegCallback; + } + + ret = HichainGmGetGroupInfo(&groupVec, &num); + if (ret != 0) { + printf("HichainGmGetGroupInfo fail:%d\n", ret); + goto err_HichainGmGetGroupInfo; + } + + if (num == 0) { + ret = HichainGmCreatGroup(); + if (ret) { + printf("HichainGmCreatGroup fail:%d\n", ret); + goto err_HichainGmCreatGroup; + } + } else { + printf("HichainGmGetGroupInfo:num=%u\n", num); + HichainSaveGroupID(groupVec); + HichainGmDestroyGroupInfo(&groupVec); + } + + return 0; + +err_HichainGmCreatGroup: +err_HichainGmGetGroupInfo: + HichainGmUnRegCallback(); +err_HichainGmRegCallback: +err_GetGmInstance: + DestroyDeviceAuthService(); + return ret; +} + +static void CheckDeviceStatus(void) +{ + DeviceList *node = g_deviceListHead; + char *groupVec = NULL; + uint32_t num; + int ret; + + ret = HichainGmGetGroupInfo(&groupVec, &num); + if (ret != 0 || num == 0) { + printf("HichainGmGetGroupInfo fail\n"); + return; + } + + ret = HichainSaveGroupID(groupVec); + if (ret != 0) + goto err_HichainSaveGroupID; + + while (node) { + if (HichainIsDeviceInGroup(g_groupId, node->device.devId)) { + node->status = DEVICE_ONLINE; + } + node = node->next; + } + +err_HichainSaveGroupID: + HichainGmDestroyGroupInfo(&groupVec); +} + +static bool CheckDeviceExist(const DeviceInfo *device) +{ + DeviceList *node = g_deviceListHead; + + while (node) { + if (strcmp(device->devId, node->device.devId) == 0) { + return true; + } + node = node->next; + } + return false; +} + +static void SaveDeviceInfo(const DeviceInfo *device) +{ + DeviceList *node = malloc(sizeof(DeviceList)); + + if (node == NULL) { + printf("SaveDeviceInfo: malloc fail\n"); + return; + } + + node->device = *device; + node->requestId = ERR_RET; + node->status = DEVICE_DISCOVERY; + if (g_deviceListHead == NULL) { + node->next = NULL; + } else { + node->next = g_deviceListHead; + } + g_deviceListHead = node; +} + +static DeviceList *GetDeviceInfo(int idx) +{ + DeviceList *node = g_deviceListHead; + while (node) { + if (--idx == 0) { + return node; + } + node = node->next; + } + return NULL; +} + +static void FreeDeviceInfo() +{ + while (g_deviceListHead) { + DeviceList *node = g_deviceListHead->next; + free(g_deviceListHead); + g_deviceListHead = node; + } +} + +static void ListDevice(void) +{ + DeviceList *node = g_deviceListHead; + int input, num = 0; + + if (node == NULL) { + printf("Get no device!\n"); + return; + } + + CheckDeviceStatus(); + while (node) { + printf("\n%d: devName=%s\n", ++num, node->device.devName); + printf("\tdevId=%s\n", node->device.devId); + printf("\tstatus=%s\n", g_deviceStatus[node->status]); + node = node->next; + } + + printf("Input num to auth:"); + scanf_s("%d", &input); + if (input <= 0 || input >num) { + printf("error input num\n"); + return; + } + + node = GetDeviceInfo(input); + if (node == NULL) { + printf("GetDeviceInfo fail\n"); + return; + } + + if (node->status == DEVICE_DISCOVERY) { + node->requestId = g_requestId; + node->status = DEVICE_JOINING; + int ret = HichainGmAddMemberToGroup(&node->device, g_groupId); + if (ret) { + printf("HichainGmAddMemberToGroup fail:%d\n", ret); + node->requestId = ERR_RET; + node->status = DEVICE_DISCOVERY; + return; + } + } +} + +static void PublishSuccess(int publishId) +{ + printf("CB: publish %d done\n", publishId); +} + +static void PublishFailed(int publishId, PublishFailReason reason) +{ + printf("CB: publish %d failed, reason=%d\n", publishId, (int)reason); +} + +static int PublishServiceInterface(void) +{ + PublishInfo info = { + .publishId = PUBLISH_ID, + .mode = DISCOVER_MODE_PASSIVE, + .medium = COAP, + .freq = LOW, + .capability = DEFAULT_CAPABILITY, + .capabilityData = NULL, + .dataLen = 0, + }; + IPublishCallback cb = { + .OnPublishSuccess = PublishSuccess, + .OnPublishFail = PublishFailed, + }; + return PublishService(APP_ID, &info, &cb); +} + +static void DeviceFound(const DeviceInfo *device) +{ + printf("CB: devName=%s", device->devName); + + if (CheckDeviceExist(device)) { + printf("device:%s udid:%s is already in List\n", device->devName, device->devId); + return; + } + SaveDeviceInfo(device); +} + +static void DiscoverySuccess(int subscribeId) +{ + printf("CB: discover subscribeId=%d\n", subscribeId); +} + +static void DiscoveryFailed(int subscribeId, DiscoveryFailReason reason) +{ + printf("CB: discover subscribeId=%d fail, reason=%d\n", subscribeId, (int)reason); +} + +static int DiscoveryInterface() +{ + SubscribeInfo info = { + .subscribeId = PUBLISH_ID, + .mode = DISCOVER_MODE_ACTIVE, + .medium = COAP, + .freq = LOW, + .isSameAccount = false, + .isWakeRemote = false, + .capability = DEFAULT_CAPABILITY, + .capabilityData = NULL, + .dataLen = 0, + }; + IDiscoveryCallback cb = { + .OnDeviceFound = DeviceFound, + .OnDiscoverFailed = DiscoveryFailed, + .OnDiscoverySuccess = DiscoverySuccess, + }; + return StartDiscovery(APP_ID, &info, &cb); +} + +int main() +{ + int ret; + bool loop = true; + + ret = GetDevUdid(g_udid, MAX_UDID_LEN); + if (ret) { + printf("GetDevUdid fail:%d\n", ret); + return ret; + } + + ret = HichainInit(); + if (ret) { + printf("HichainInit fail\n"); + return ret; + } + + ret = PublishServiceInterface(); + if (ret) { + printf("PublishService fail, ret=%d\n", ret); + goto err_PublishServiceInterface; + } + + ret = DiscoveryInterface(); + if (ret != 0) { + printf("DiscoveryInterface fail\n"); + goto err_DiscoveryInterface; + } + + while (loop) { + printf("\nInput l to list device; Input s to stop:"); + while (true) { + char c = getchar(); + if (c == 'l') { + ListDevice(); + continue; + } else if (c == 's') { + loop = false; + break; + } else if (c == '\n') { + break; + } else { + continue; + } + } + } + + StopDiscovery(APP_ID, PUBLISH_ID); + FreeDeviceInfo(); +err_DiscoveryInterface: + UnPublishService(APP_ID, PUBLISH_ID); +err_PublishServiceInterface: + HichainGmUnRegCallback(); + return ret; + +} diff --git a/dsoftbus/softbus_sample/softbus_client_main.c b/dsoftbus/softbus_sample/softbus_client_main.c new file mode 100644 index 0000000000000000000000000000000000000000..cdbaf55f5b467225c0f44ff13359605c593c8311 --- /dev/null +++ b/dsoftbus/softbus_sample/softbus_client_main.c @@ -0,0 +1,301 @@ +#include +#include +#include +#include "securec.h" +#include "discovery_service.h" +#include "softbus_bus_center.h" +#include "session.h" + +#define PACKAGE_NAME "softbus_sample" +#define LOCAL_SESSION_NAME "session_test" +#define TARGET_SESSION_NAME "session_test" +#define DEFAULT_CAPABILITY "osdCapability" +#define DEFAULT_SESSION_GROUP "group_test" +#define DEFAULT_PUBLISH_ID 123 + +static int g_sessionId; + +static void PublishSuccess(int publishId) +{ + printf("CB: publish %d done\n", publishId); +} + +static void PublishFailed(int publishId, PublishFailReason reason) +{ + printf("CB: publish %d failed, reason=%d\n", publishId, (int)reason); +} + +static int PublishServiceInterface() +{ + PublishInfo info = { + .publishId = DEFAULT_PUBLISH_ID, + .mode = DISCOVER_MODE_PASSIVE, + .medium = COAP, + .freq = LOW, + .capability = DEFAULT_CAPABILITY, + .capabilityData = NULL, + .dataLen = 0, + }; + IPublishCallback cb = { + .OnPublishSuccess = PublishSuccess, + .OnPublishFail = PublishFailed, + }; + return PublishService(PACKAGE_NAME, &info, &cb); +} + +static void UnPublishServiceInterface(void) +{ + int ret = UnPublishService(PACKAGE_NAME, DEFAULT_PUBLISH_ID); + if (ret != 0) { + printf("UnPublishService fail:%d\n", ret); + } +} + +static void DeviceFound(const DeviceInfo *device) +{ + unsigned int i; + printf("CB: Device has found\n"); + printf("\tdevId=%s\n", device->devId); + printf("\tdevName=%s\n", device->devName); + printf("\tdevType=%d\n", device->devType); + printf("\taddrNum=%d\n", device->addrNum); + for (i = 0; i < device->addrNum; i++) { + printf("\t\taddr%d:type=%d,", i + 1, device->addr[i].type); + switch (device->addr[i].type) { + case CONNECTION_ADDR_WLAN: + case CONNECTION_ADDR_ETH: + printf("ip=%s,port=%d,", device->addr[i].info.ip.ip, device->addr[i].info.ip.port); + break; + default: + break; + } + printf("peerUid=%s\n", device->addr[i].peerUid); + } + printf("\tcapabilityBitmapNum=%d\n", device->capabilityBitmapNum); + for (i = 0; i < device->addrNum; i++) { + printf("\t\tcapabilityBitmap[%d]=0x%x\n", i + 1, device->capabilityBitmap[i]); + } + printf("\tcustData=%s\n", device->custData); +} + +static void DiscoverySuccess(int subscribeId) +{ + printf("CB: discover subscribeId=%d\n", subscribeId); +} + +static void DiscoveryFailed(int subscribeId, DiscoveryFailReason reason) +{ + printf("CB: discover subscribeId=%d failed, reason=%d\n", subscribeId, (int)reason); +} + +static int DiscoveryInterface(void) +{ + SubscribeInfo info = { + .subscribeId = DEFAULT_PUBLISH_ID, + .mode = DISCOVER_MODE_ACTIVE, + .medium = COAP, + .freq = LOW, + .isSameAccount = false, + .isWakeRemote = false, + .capability = DEFAULT_CAPABILITY, + .capabilityData = NULL, + .dataLen = 0, + }; + IDiscoveryCallback cb = { + .OnDeviceFound = DeviceFound, + .OnDiscoverFailed = DiscoveryFailed, + .OnDiscoverySuccess = DiscoverySuccess, + }; + return StartDiscovery(PACKAGE_NAME, &info, &cb); +} + +static void StopDiscoveryInterface(void) +{ + int ret = StopDiscovery(PACKAGE_NAME, DEFAULT_PUBLISH_ID); + if (ret) { + printf("StopDiscovery fail:%d\n", ret); + } +} + +static int SessionOpened(int sessionId, int result) +{ + printf("CB: session %d open fail:%d\n", sessionId, result); + if (result == 0) { + g_sessionId = sessionId; + } + + return result; +} + +static void SessionClosed(int sessionId) +{ + printf("CB: session %d closed\n", sessionId); +} + +static void ByteRecived(int sessionId, const void *data, unsigned int dataLen) +{ + printf("CB: session %d received %u bytes data=%s\n", sessionId, dataLen, (const char *)data); +} + +static void MessageReceived(int sessionId, const void *data, unsigned int dataLen) +{ + printf("CB: session %d received %u bytes message=%s\n", sessionId, dataLen, (const char *)data); +} + +static int CreateSessionServerInterface(void) +{ + const ISessionListener sessionCB = { + .OnSessionOpened = SessionOpened, + .OnSessionClosed = SessionClosed, + .OnBytesReceived = ByteRecived, + .OnMessageReceived = MessageReceived, + }; + + return CreateSessionServer(PACKAGE_NAME, LOCAL_SESSION_NAME, &sessionCB); +} + +static void RemoveSessionServerInterface(void) +{ + int ret = RemoveSessionServer(PACKAGE_NAME, LOCAL_SESSION_NAME); + if (ret) { + printf("RemoveSessionServer fail:%d\n", ret); + } +} + +static int OpenSessionInterface(const char *peerNetworkId) +{ + SessionAttribute attr = { + .dataType = TYPE_BYTES, + .linkTypeNum = 1, + .linkType[0] = LINK_TYPE_WIFI_WLAN_2G, + .attr = {RAW_STREAM}, + }; + + return OpenSession(LOCAL_SESSION_NAME, TARGET_SESSION_NAME, peerNetworkId, DEFAULT_SESSION_GROUP, &attr); +} + +static void CloseSessionInterface(int sessionId) +{ + CloseSession(sessionId); +} + +static int GetAllNodeDeviceInfoInterface(NodeBasicInfo **dev) +{ + int ret, num; + + ret = GetAllNodeDeviceInfo(PACKAGE_NAME, dev, &num); + if (ret) { + printf("GetAllNodeDeviceInfo fail:%d\n", ret); + return -1; + } + + printf("return %d Node\n", num); + return num; +} + +static void FreeNodeInfoInterface(NodeBasicInfo *dev) +{ + FreeNodeInfo(dev); +} + +static void commnunicate(void) +{ + NodeBasicInfo *dev = NULL; + char cData[] = "hello world test"; + int dev_num, sessionId, input, ret; + int timeout = 5; + + dev_num = GetAllNodeDeviceInfoInterface(&dev); + if (dev_num <= 0) { + return; + } + + for (int i = 0; i < dev_num; i++) { + char devId[UDID_BUF_LEN]; + printf("deviceName=%s\n", i + 1, dev[i].deviceName); + printf("\tnetworkId=%s\n", dev[i].networkId); + if (GetNodeKeyInfo(PACKAGE_NAME, dev[i].networkId, NODE_KEY_UDID, (uint8_t *)devId, UDID_BUF_LEN) == 0) { + printf("\tdevId=%s\n", devId); + } + printf("\tType=%d\n", dev[i].deviceTypeId); + } + + printf("\nInput Node num to commnunication:"); + scanf_s("%d", &input); + if (input <= 0 || input > dev_num) { + printf("error input num\n"); + goto err_input; + } + + g_sessionId = -1; + sessionId = OpenSessionInterface(dev[input - 1].networkId); + if (sessionId < 0) { + printf("OpenSessionInterface fail, ret=%d\n", sessionId); + goto err_OpenSessionInterface; + } + + while (timeout) { + if (g_sessionId == sessionId) { + ret = SendBytes(sessionId, cData, strlen(cData) + 1); + if (ret) { + printf("SendBytes fail:%d\n", ret); + } + break; + } + timeout--; + sleep(1); + } + + CloseSessionInterface(sessionId); +err_OpenSessionInterface: +err_input: + FreeNodeInfoInterface(dev); +} + +int main(int argc, char **argv) +{ + bool loop = true; + int ret; + + ret = CreateSessionServerInterface(); + if (ret) { + printf("CreateSessionServer fail, ret=%d\n", ret); + return ret; + } + + ret = PublishServiceInterface(); + if (ret) { + printf("PublishService fail, ret=%d\n", ret); + goto err_PublishServiceInterface; + } + + ret = DiscoveryInterface(); + if (ret) { + printf("DiscoveryInterface fail, ret=%d\n", ret); + goto err_DiscoveryInterface; + } + + while (loop) { + printf("\nInput c to commnuication, Input s to stop:"); + char op = getchar(); + switch(op) { + case 'c': + commnunicate(); + continue; + case 's': + loop = false; + break; + case '\n': + break; + default: + continue; + } + } + + StopDiscoveryInterface(); +err_DiscoveryInterface: + UnPublishServiceInterface(); +err_PublishServiceInterface: + RemoveSessionServerInterface(); + return 0; +} diff --git a/dsoftbus/softbus_trans_permission.json b/dsoftbus/softbus_trans_permission.json new file mode 100644 index 0000000000000000000000000000000000000000..8a882a93f3a201f6269f92d18a4c75cbd6ce80ae --- /dev/null +++ b/dsoftbus/softbus_trans_permission.json @@ -0,0 +1,171 @@ +[ + { + "SESSION_NAME": "DBinderService", + "DEVID": "NETWORKID", + "APP_INFO": [ + { + "TYPE": "native_app", + "UID": "1000", + "PKG_NAME": "DBinderService", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "DBinder.*", + "REGEXP": "true", + "DEVID": "NETWORKID", + "SEC_LEVEL": "public", + "APP_INFO": [ + { + "TYPE": "granted_app", + "PKG_NAME": "DBinderBus", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "DistributedFileService.*", + "REGEXP": "true", + "DEVID": "UUID", + "APP_INFO": [ + { + "TYPE": "native_app", + "UID": "1000", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "distributeddata-default", + "REGEXP": "true", + "DEVID": "UUID", + "SEC_LEVEL": "public", + "APP_INFO": [ + { + "TYPE": "normal_app", + "PKG_NAME": "ohos.distributeddata", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "objectstoreDB-*", + "REGEXP": "true", + "DEVID": "UDID", + "SEC_LEVEL": "public", + "APP_INFO": [ + { + "TYPE": "normal_app", + "PKG_NAME": "ohos.objectstore", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "ohos.distributedschedule.dms.*", + "REGEXP": "true", + "DEVID": "UUID", + "APP_INFO": [ + { + "PKG_NAME": "dms", + "TYPE": "system_app", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "com.huawei.dmsdp.*", + "REGEXP": "true", + "DEVID": "UDID", + "APP_INFO": [ + { + "TYPE": "system_app", + "PKG_NAME": "com.huawei.dmsdp", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "com.huawei.devicegroupmanage", + "DEVID": "UDID", + "APP_INFO": [ + { + "PKG_NAME": "com.huawei.devicegroupmanage", + "TYPE": "system_app", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "ohos.distributedhardware.devicemanager.*", + "REGEXP": "true", + "DEVID": "UUID", + "APP_INFO": [ + { + "PKG_NAME": "ohos.distributedhardware.devicemanager", + "TYPE": "system_app", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "ohos.dhardware.*", + "REGEXP": "true", + "DEVID": "NETWORKID", + "SEC_LEVEL": "public", + "APP_INFO": [ + { + "PKG_NAME": "ohos.dhardware", + "TYPE": "system_app", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "security.dpms_channel", + "DEVID": "NETWORKID", + "APP_INFO": [ + { + "TYPE": "system_app", + "PKG_NAME": "ohos.security.distributed_permission", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "device.security.level", + "DEVID": "UDID", + "APP_INFO": [ + { + "TYPE": "native_app", + "UID": "1000", + "PKG_NAME": "ohos.dslm", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "ohos.security.atm_channel", + "DEVID": "NETWORKID", + "APP_INFO": [ + { + "TYPE": "system_app", + "PKG_NAME": "ohos.security.distributed_access_token", + "ACTIONS": "create,open" + } + ] + }, + { + "SESSION_NAME": "session_test", + "DEVID": "UDID", + "APP_INFO": [ + { + "PKG_NAME": "softbus_sample", + "TYPE": "normal_app", + "ACTIONS": "create,open" + } + ] + } +] + diff --git a/dsoftbus/test/softbus_main.cpp b/dsoftbus/test/softbus_main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0e5a9c5c7eeeb26ac188fb9369975d36099eeda4 --- /dev/null +++ b/dsoftbus/test/softbus_main.cpp @@ -0,0 +1,358 @@ +#include +#include +#include +#include +#include + +#include "dsoftbus/discovery_service.h" +#include "dsoftbus/softbus_bus_center.h" +#include "dsoftbus/session.h" + +std::vector devList; +char receivedData[256]; + +bool hasPublish = false; +bool hasDiscovery = false; +const size_t LINE_WIDTH = 80; + +// #define ImReceive + +#define clientName "Client" + +void PrintMargin() +{ + for (size_t i = 0; i < LINE_WIDTH; ++i) { + printw("#"); + } + printw("\n"); +} + +void PrintTitle(const char *msg) +{ + size_t len = strlen(msg); + if (len + 2 <= LINE_WIDTH) { + size_t emptyLen = LINE_WIDTH - len - 2; + size_t preEmptyLen = emptyLen / 2; + size_t postEmptyLen = emptyLen - preEmptyLen; + printw("#"); + while (preEmptyLen--) { + printw(" "); + } + printw("%s", msg); + while (postEmptyLen--) { + printw(" "); + } + printw("#\n"); + } else { + printw("#%s#\n", msg); + } +} + +void PrintMessage(const char *msg) +{ + size_t len = strlen(msg); + if (len + 6 <= LINE_WIDTH) { + size_t emptyLen = LINE_WIDTH - len - 6; + printw("# "); + printw("%s", msg); + while (emptyLen--) { + printw(" "); + } + printw(" #\n"); + } else { + printw("#%s#\n", msg); + } +} + +void DisplayTitle() +{ + erase(); + PrintMargin(); + PrintTitle("openEuler"); + PrintTitle("SoftBus"); + PrintTitle(""); + PrintTitle(""); +} + +void DisplayMain() +{ + DisplayTitle(); + PrintTitle(""); + PrintTitle(""); + PrintTitle(""); + PrintMessage("Press key:"); + PrintMessage("[D]DiscoveryService [T]DataTransmission"); + PrintTitle(""); + PrintTitle(""); + PrintMargin(); +} + +void DisplayDevList() +{ + char msg[256]; + DisplayTitle(); + PrintTitle(""); + PrintTitle(""); + PrintMessage("Press key:"); + PrintMessage("[S]StopDiscovery"); + PrintMessage("Devices list:"); + for (size_t i = 0; i < devList.size(); ++i) + PrintMessage(devList[i]->devId); + PrintTitle(""); + PrintMargin(); +} + +void DisplayReceivedData() +{ + DisplayTitle(); + PrintTitle(""); + PrintTitle(""); + PrintMessage("Press key:"); + PrintMessage("[Esc]Back"); + PrintMessage("Received data:"); + PrintMessage(receivedData); + PrintTitle(""); + PrintTitle(""); + PrintMargin(); +} + +void PublishSuccess(int publishId) +{ + // printf("CB: publish %d done", publishId); +} + +void PublishFailed(int publishId, PublishFailReason reason) +{ + // printf("CB: publish %d failed, reason=%d\n", publishId, (int)reason); +} + +void DeviceFound(const DeviceInfo *device) +{ + devList.push_back(device); + DisplayDevList(); +} + +void DiscoverySuccess(int subscribeId) +{ + // printf("CB: discover subscribeId=%d\n", subscribeId); +} + +void DiscoveryFailed(int subscribeId, DiscoveryFailReason reason) +{ + // printf("CB: discover subscribeId=%d failed, reason=%d\n", subscribeId, (int)reason); +} + +int SessionOpened(int sessionId, int result) +{ + // printf("CB: session %d open ret=%d\n", sessionId, result); + return 0; +} + +void SessionClosed(int sessionId) +{ + // printf("CB: session %d closed\n", sessionId); +} + +void ByteRecived(int sessionId, const void *data, unsigned int dataLen) +{ + memset(receivedData, 0, sizeof(receivedData)); + memcpy(receivedData, data, dataLen); + DisplayReceivedData(); + // printf("CB: session %d received %u bytes data=%s\n", sessionId, dataLen, (const char *)data); +} + +void MessageReceived(int sessionId, const void *data, unsigned int dataLen) +{ + // printf("CB: session %d received %u bytes message=%s\n", sessionId, dataLen, (const char *)data); +} + +unsigned char cData[] = "My Client Test"; + +int PublishServiceInterface() +{ + PublishInfo info = { + .publishId = 123, + .mode = DISCOVER_MODE_ACTIVE, + .medium = COAP, + .freq = LOW, + .capability = "hicall", + .capabilityData = cData, + .dataLen = sizeof(cData), + }; + IPublishCallback cb = { + .OnPublishSuccess = PublishSuccess, + .OnPublishFail = PublishFailed, + }; + return PublishService(clientName, &info, &cb); +} + +int DiscoveryInterface() +{ + SubscribeInfo info = { + .subscribeId = 123, + .mode = DISCOVER_MODE_ACTIVE, + .medium = COAP, + .freq = LOW, + .isSameAccount = false, + .isWakeRemote = false, + .capability = "hicall", + .capabilityData = cData, + .dataLen = sizeof(cData), + }; + IDiscoveryCallback cb = { + .OnDeviceFound = DeviceFound, + .OnDiscoverFailed = DiscoveryFailed, + .OnDiscoverySuccess = DiscoverySuccess, + }; + return StartDiscovery(clientName, &info, &cb); +} + +int CreateSessionServerInterface(const char *SessionName) +{ + ISessionListener cb = { + .OnSessionOpened = SessionOpened, + .OnSessionClosed = SessionClosed, + .OnBytesReceived = ByteRecived, + .OnMessageReceived = MessageReceived, + }; + return CreateSessionServer(clientName, SessionName, &cb); +} + +int OpenSessionInterface(const char *SessionName, const char *peerName, const char *peerId) +{ + SessionAttribute attr = { + .dataType = TYPE_BYTES, + .lintTypeNum = 1, + .attr = {RAW_STREAM}, + }; + int lt = LINK_TYPE_WIFI_P2P; + attr.lintType = < + return OpenSession(SessionName, peerName, peerId, "MyGroup", &attr); +} + +void Discovering() +{ + devList.clear(); + DiscoveryInterface(); + while (true) { + DisplayDevList(); + char op = getch(); + if (op == 'S' || op == 's') { + break; + } + } +} + +void SendData() +{ + NodeBasicInfo *dev; + int32_t dev_num; + GetAllNodeDeviceInfo(clientName, &dev, &dev_num); + CreateSessionServerInterface("SessionTest1"); + char op; + int32_t selectId = 0; + char input[128]; + while (true) { + DisplayTitle(); + PrintTitle(""); + PrintTitle(""); + PrintMessage("Press key:"); + PrintMessage("[U]Up [D]Down [S]Select"); + PrintTitle(""); + for (int32_t i = 0; i < dev_num; ++i) { + strcpy(input, i == selectId ? "*" : " "); + strcat(input, dev->networkId); + PrintMessage(input); + } + PrintTitle(""); + PrintMargin(); + // to do + op = getch(); + if (op == 'U' || op == 'u') { + --selectId; + if (selectId < 0) { + selectId = dev_num - 1; + } + } else if (op == 'D' || op == 'd') { + ++selectId; + if (selectId == dev_num) { + selectId = 0; + } + } else if (op == 'S' || op == 's') { + break; + } + } + size_t inputLen = 0; + if (dev_num) { + int sessionId = OpenSessionInterface("SessionTest1", "SessionTest2", dev[selectId].networkId); + while (sessionId >= 0) { + input[inputLen] = '\0'; + DisplayTitle(); + PrintTitle(""); + PrintTitle(""); + PrintMessage("Press key:"); + PrintMessage("[Enter]Send [Esc]Back"); + PrintMessage("Message:"); + PrintMessage(input); + PrintTitle(""); + PrintMargin(); + op = getch(); + if (op == 13) { + SendBytes(sessionId, input, strlen(input)); + inputLen = 0; + } else if (op == 8) { + if (inputLen) { + --inputLen; + } + } else if (op == 27) { + break; + } else { + input[inputLen] = op; + ++inputLen; + } + } + CloseSession(selectId); + } +} + +void ReceiveData() +{ + NodeBasicInfo *dev; + int32_t dev_num; + GetAllNodeDeviceInfo(clientName, &dev, &dev_num); + CreateSessionServerInterface("SessionTest2"); + while (true) { + DisplayReceivedData(); + char op = getch(); + if (op == 27) { + break; + } + } +} + +int main(int argc, char **argv) +{ + bool needContinue = true; + initscr(); + while (needContinue) { + DisplayMain(); + char op = getch(); + switch(op) { + case 'D': + case 'd': + Discovering(); + break; + case 'T': + case 't': +#ifdef ImReceive + ReceiveData(); +#else + SendData(); +#endif + case 27: + needContinue = false; + } + } + endwin(); + return 0; +} diff --git a/robot_painting/README.md b/robot_painting/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6c963b8979077f9107fec9ba9701989b2d8c4bec --- /dev/null +++ b/robot_painting/README.md @@ -0,0 +1,48 @@ +# yocto-embedded-tools + +#### 介绍 +The tools provided by sig-Yocto + +#### 软件架构 +软件架构说明 + + +#### 安装教程 +1. 全局安装conda,node和pnpm; +2. 进入robot_painting\robot-sketch-vue,执行pnpm i; +3. 进入robot_painting\qmupd_vs,conda安装environment.yaml; +4. 进入robot_painting\image-matting,conda安装requirements.txt; + +#### 使用说明 + +1. 前端启动:进入robot_painting\robot-sketch-vue,执行pnpm dev; +2. 后端启动:进入robot_painting\image-matting,执行 +``` +conda activate robot-sketch + +python3 app.py +``` + +3. 风格化:进入robot_painting\qmupd_vs,执行 +``` +conda activate vsketch +export FLASK_APP=main +flask run --host=0.0.0.0 +``` + +#### 参与贡献 + +1. Fork 本仓库 +2. 新建 Feat_xxx 分支 +3. 提交代码 +4. 新建 Pull Request + + +#### 特技 + +1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md +2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com) +3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目 +4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目 +5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help) +6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) diff --git a/robot_painting/image-matting/.dockerignore b/robot_painting/image-matting/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..190f51b59b39731fe3345574e2aed3912a00b291 --- /dev/null +++ b/robot_painting/image-matting/.dockerignore @@ -0,0 +1,6 @@ +.github/workflows/build-image.yml +.git +.gitignore +.dockerignore +README.md +LICENSE diff --git a/robot_painting/image-matting/.gitignore b/robot_painting/image-matting/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..8b3ae03f28a2e2a917c710b39dee31c975cf5654 --- /dev/null +++ b/robot_painting/image-matting/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +output +upload diff --git a/robot_painting/image-matting/Dockerfile b/robot_painting/image-matting/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..e41183c96385a43fa7d282d8736653ffaeea6921 --- /dev/null +++ b/robot_painting/image-matting/Dockerfile @@ -0,0 +1,20 @@ +FROM python:3.10 + +WORKDIR /app + +COPY . /app + +RUN pip install --no-cache-dir -r requirements.txt + +EXPOSE 8000 + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + libgl1-mesa-glx \ + tzdata && \ + ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ + dpkg-reconfigure -f noninteractive tzdata && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +CMD ["python", "app.py"] diff --git a/robot_painting/image-matting/LICENSE b/robot_painting/image-matting/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..101a3246aea49720f7589327e1a6bd7065acd19d --- /dev/null +++ b/robot_painting/image-matting/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Hmily + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/robot_painting/image-matting/README.md b/robot_painting/image-matting/README.md new file mode 100644 index 0000000000000000000000000000000000000000..589266007e9409f8a06ca8041e4d56c0f5b3723d --- /dev/null +++ b/robot_painting/image-matting/README.md @@ -0,0 +1,118 @@ +## Imgae matting + +Here are a few effects(omitting mask images): + +![image-1](https://github.com/ihmily/image-matting/blob/main/assets/image-1.png) + +![image-2](https://github.com/ihmily/image-matting/blob/main/assets/image-2.png) + +  + +## How to Run + +**Method 1: Run from Source Code** + +Firstly, you need to download the project code and install the required dependencies. + +``` +# Python 3.10 + +git clone https://github.com/ihmily/image-matting.git +cd image-matting +pip install -r requirements.txt +``` + +Next, use the following command to run the web interface. + +``` +python app.py +``` + +Finally, visit http://127.0.0.1:8000/. + +  + +**Method 2: Run with Docker** + +Simply run the following commands after entering the project folder. + +Pull the Docker image. + +``` +docker pull ihmily/image-matting:latest +``` + +After the image is pulled, run the container. + +``` +docker run -p 8000:8000 ihmily/image-matting:latest +``` + +Alternatively, you can build the image yourself. + +``` +docker build -t image-matting:latest . +docker run -p 8000:8000 image-matting:latest +``` + +Lastly, visit http://127.0.0.1:8000 to perform online image matting. Feel free to choose the method that suits your preference. + +  + +## Use API + +Please run it before use API + +File upload + +``` +import requests + +server = "http://127.0.0.1:8000" +image_path = "image.png" +model_name = "universal" # people,universal +files = {"image": (image_path, open(image_path, "rb"))} +data = {"model": model_name} +response = requests.post(server+'/matting', files=files, data=data) +print(response.text) +json_data = response.json() +image_url = json_data['result_image_url'] +mask_url = json_data['mask_image_url'] +print("image_url:", server + image_url) +print("mask_url:", server + mask_url) +``` + +Url upload + +``` +import requests + +server = "http://127.0.0.1:8000" +image_url = "http://your-image-url/demo.png" +data = {"image_url": image_url, "model": "universal"} # people,universal +response = requests.post(server+'/matting/url', json=data) +print(response.text) +json_data = response.json() +image_url = json_data['result_image_url'] +mask_url = json_data['mask_image_url'] +print("image_url:",server+image_url) +print("mask_url:",server+mask_url) +``` + +You can freely choose the method you want to upload from above.If you want to get the cropped cutout, you can call `crop_image_by_alpha_channel` function. + +  + +## Extended Gallery + +![image-3](https://github.com/ihmily/image-matting/blob/main/assets/image-3.png) + +![image-4](https://github.com/ihmily/image-matting/blob/main/assets/image-4.png) + +  + +## References + +[https://modelscope.cn/models/damo/cv_unet_universal-matting/summary](https://modelscope.cn/models/damo/cv_unet_universal-matting/summary) + +[https://modelscope.cn/models/damo/cv_unet_image-matting/summary](https://modelscope.cn/models/damo/cv_unet_image-matting/summary) diff --git a/robot_painting/image-matting/app.py b/robot_painting/image-matting/app.py new file mode 100644 index 0000000000000000000000000000000000000000..d16ccb028a41b0b4bb5999b1b8ed022b8d08cc45 --- /dev/null +++ b/robot_painting/image-matting/app.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import uuid +from datetime import datetime +import httpx +from fastapi import FastAPI, File, UploadFile, Form, HTTPException, Request, status +import cv2 +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks +from modelscope.outputs import OutputKeys +import numpy as np +from starlette.staticfiles import StaticFiles +from starlette.templating import Jinja2Templates +os.environ["CUDA_VISIBLE_DEVICES"] = "-1" +app = FastAPI() + +model_paths = { + "universal": {'path': './damo/cv_unet_universal-matting', 'task': Tasks.universal_matting}, + "people": {'path': './damo/cv_unet_image-matting', 'task': Tasks.portrait_matting}, +} + +default_model = list(model_paths.keys())[1] +default_model_info = model_paths[default_model] +loaded_models = {default_model: pipeline(default_model_info['task'], model=default_model_info['path'], device='cpu')} + + +UPLOAD_FOLDER = "./upload" +OUTPUT_FOLDER = "./output" + +os.makedirs(UPLOAD_FOLDER, exist_ok=True) +os.makedirs(OUTPUT_FOLDER, exist_ok=True) + + +class ModelLoader: + def __init__(self): + self.loaded_models = {default_model: loaded_models[default_model]} + + def load_model(self, model_name): + if model_name not in self.loaded_models: + model_info = model_paths[model_name] + if not model_info: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid model selection") + model_path = model_info['path'] + task_group = model_info['task'] + + self.loaded_models[model_name] = pipeline(task_group, model=model_path, device='cpu') + return self.loaded_models[model_name] + + +model_loader = ModelLoader() + + +def get_filename(): + filename = uuid.uuid4() + original_image_filename = f"original_{filename}.png" + image_filename = f"image_{filename}.png" + mask_filename = f"mask_{filename}.png" + return original_image_filename, image_filename, mask_filename + + +# remove excess transparent background and crop the image +def crop_image_by_alpha_channel(input_image: np.ndarray | str, output_path: str): + img_array = cv2.imread(input_image, cv2.IMREAD_UNCHANGED) if isinstance(input_image, str) else input_image + if img_array.shape[2] != 4: + raise ValueError("Input image must have an alpha channel") + + alpha_channel = img_array[:, :, 3] + bbox = cv2.boundingRect(alpha_channel) + x, y, w, h = bbox + cropped_img_array = img_array[y:y + h, x:x + w] + cv2.imwrite(output_path, cropped_img_array) + return output_path + + +def process_image(image_bytes: bytes): + img = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_UNCHANGED) + final_img = convert_image_to_white_background(image=img) + if final_img is None: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid image") + return final_img + + +def convert_image_to_white_background(image_path: str = None, image: np.ndarray | None = None): + try: + if image_path is not None: + img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) + elif image is not None: + img = image + else: + raise ValueError("Either image_path or image must be provided.") + + if img.shape[2] == 4: + alpha_channel = img[:, :, 3] + rgb_channels = img[:, :, :3] + + alpha_channel_3d = alpha_channel[:, :, np.newaxis] / 255.0 + alpha_channel_3d = np.repeat(alpha_channel_3d, 3, axis=2) + + white_background_image = np.ones_like(rgb_channels, dtype=np.uint8) * 255 + + foreground = cv2.multiply(rgb_channels, alpha_channel_3d, dtype=cv2.CV_8UC3) + background = cv2.multiply(white_background_image, 1 - alpha_channel_3d, dtype=cv2.CV_8UC3) + + final_img = cv2.add(foreground, background) + else: + final_img = img + return final_img + except Exception as e: + print(f'Error: {e}') + return None + + +@app.post("/switch_model/{new_model}") +async def switch_model(new_model: str): + if new_model not in model_paths: + return {"content": "Invalid model selection"}, status.HTTP_400_BAD_REQUEST + model_info = model_paths[new_model] + + loaded_models[new_model] = pipeline(model_info['task'], model=model_info['path']) + model_loader.loaded_models = loaded_models + return {"content": f"Switched to model: {new_model}"}, status.HTTP_200_OK + + +@app.post("/matting") +async def matting(image: UploadFile = File(...), model: str = Form(default=default_model, alias="model")): + try: + image_bytes = await image.read() + img = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_UNCHANGED) + + if model not in model_paths: + return {"content": "Invalid model selection"}, status.HTTP_400_BAD_REQUEST + + selected_model = model_loader.load_model(model) + original_image_filename, image_filename, mask_filename = get_filename() + cv2.imwrite(os.path.join(UPLOAD_FOLDER, original_image_filename), img) + + final_img = convert_image_to_white_background(image=img) + if final_img is None: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid image") + result = selected_model(final_img) + + cv2.imwrite(os.path.join(OUTPUT_FOLDER, image_filename), result[OutputKeys.OUTPUT_IMG]) + cv2.imwrite(os.path.join(OUTPUT_FOLDER, mask_filename), result[OutputKeys.OUTPUT_IMG][:, :, 3]) + + response_data = { + "code": 0, + "result_image_url": f"/output/{image_filename}", + "mask_image_url": f"/output/{mask_filename}", + "original_image_size": {"width": img.shape[1], "height": img.shape[0]}, + "generation_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + } + return response_data + except HTTPException as e: + return {"error": str(e)}, e.status_code + except Exception as e: + return {"error": str(e)}, status.HTTP_500_INTERNAL_SERVER_ERROR + + +@app.post("/matting/url") +async def matting_url(request: Request, model: str = Form(default=default_model, alias="model")): + try: + json_data = await request.json() + image_url = json_data.get("image_url") + except Exception as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"Error parsing JSON data: {str(e)}") + + if not image_url: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Image URL is required") + + try: + async with httpx.AsyncClient() as client: + response = await client.get(image_url) + response.raise_for_status() + img_array = np.frombuffer(response.content, dtype=np.uint8) + img = cv2.imdecode(img_array, cv2.IMREAD_UNCHANGED) + except httpx.RequestError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"Failed to fetch image from URL: {str(e)}") + + if model not in model_paths: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid model selection") + + selected_model = model_loader.load_model(model) + original_image_filename, image_filename, mask_filename = get_filename() + cv2.imwrite(os.path.join(UPLOAD_FOLDER, original_image_filename), img) + + final_img = convert_image_to_white_background(image=img) + if final_img is None: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid image") + result = selected_model(final_img) + + cv2.imwrite(os.path.join(OUTPUT_FOLDER, image_filename), result[OutputKeys.OUTPUT_IMG]) + cv2.imwrite(os.path.join(OUTPUT_FOLDER, mask_filename), result[OutputKeys.OUTPUT_IMG][:, :, 3]) + + response_data = { + "code": 0, + "result_image_url": f"/output/{image_filename}", + "mask_image_url": f"/output/{mask_filename}", + "original_image_size": {"width": img.shape[1], "height": img.shape[0]}, + "generation_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + } + + return response_data + + +templates = Jinja2Templates(directory="web") +app.mount("/static", StaticFiles(directory="./web/static"), name="static") +app.mount("/output", StaticFiles(directory="./output"), name="output") +app.mount("/upload", StaticFiles(directory="./upload"), name="upload") + + +@app.get("/") +async def read_index(request: Request): + return templates.TemplateResponse( + "index.html", { + "request": request, + "default_model": default_model, + "available_models": list(model_paths.keys()) + }) + + +if __name__ == "__main__": + import uvicorn + + default_bind_host = "0.0.0.0" if sys.platform != "win32" else "127.0.0.1" + uvicorn.run(app, host=default_bind_host, port=8001) diff --git a/robot_painting/image-matting/assets/image-1.png b/robot_painting/image-matting/assets/image-1.png new file mode 100644 index 0000000000000000000000000000000000000000..f34bc720b07324aacba9154c9c9149a7de6110ab Binary files /dev/null and b/robot_painting/image-matting/assets/image-1.png differ diff --git a/robot_painting/image-matting/assets/image-2.png b/robot_painting/image-matting/assets/image-2.png new file mode 100644 index 0000000000000000000000000000000000000000..959242189d57fe074eca9aecac89149191d36642 Binary files /dev/null and b/robot_painting/image-matting/assets/image-2.png differ diff --git a/robot_painting/image-matting/assets/image-3.png b/robot_painting/image-matting/assets/image-3.png new file mode 100644 index 0000000000000000000000000000000000000000..d9a56b7f318e3a0ed8e2af6fee266e2b378ce207 Binary files /dev/null and b/robot_painting/image-matting/assets/image-3.png differ diff --git a/robot_painting/image-matting/assets/image-4.png b/robot_painting/image-matting/assets/image-4.png new file mode 100644 index 0000000000000000000000000000000000000000..ca3f8b100b8357719497c1b43f07a4e561806d85 Binary files /dev/null and b/robot_painting/image-matting/assets/image-4.png differ diff --git a/robot_painting/image-matting/requirements.txt b/robot_painting/image-matting/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f88b00463c14bfbd5d1ef43b3f5c778d2748015b --- /dev/null +++ b/robot_painting/image-matting/requirements.txt @@ -0,0 +1,13 @@ +fastapi==0.108.0 +httpx +jinja2 +modelscope>=1.10.0 +opencv-python +python-multipart +sentencepiece==0.1.* +tensorflow +torch==2.1.2 +transformers==4.36.2 +uvicorn + + diff --git a/robot_painting/image-matting/web/index.html b/robot_painting/image-matting/web/index.html new file mode 100644 index 0000000000000000000000000000000000000000..6d3a8b860b2cbe543ae224051837d0735e3434a9 --- /dev/null +++ b/robot_painting/image-matting/web/index.html @@ -0,0 +1,116 @@ + + + + + + + Simple Image Matting + + + + +
+

Simple Image Matting

+
+ Fork me on GitHub +
+
+ + + + + + + +
+ +
+  +  +  +
+
+ +
+

© 2024 Hmily. All rights reserved.

+
+ + + + + + diff --git a/robot_painting/image-matting/web/static/css/style.css b/robot_painting/image-matting/web/static/css/style.css new file mode 100644 index 0000000000000000000000000000000000000000..ee5f21e6c56145aaaa02d9843a3a07060be76e8e --- /dev/null +++ b/robot_painting/image-matting/web/static/css/style.css @@ -0,0 +1,53 @@ +body { + font-family: 'Arial', sans-serif; + background-color: #f4f4f4; + margin: 0; + padding: 0; +} + +header { + background-color: #333; + color: white; + padding: 1em; + text-align: center; +} + +main { + max-width: 1200px; + margin: 2em auto; + background-color: white; + padding: 2em; + box-shadow: 0 0 10px rgba(0, 0, 0, 0.1); + text-align: center; +} + +#upload-form { + text-align: center; + margin-bottom: 2em; +} + +#images-container { + display: flex; + justify-content: space-between; + align-items: center; + margin-top: 2em; /* Add spacing */ +} + +#original-img, +#mask-img, +#result-img { + max-width: 30%; + height: auto; + border: 1px solid #ccc; + margin: 0.5em; /* Add spacing */ +} + +footer { + background-color: #333; + color: white; + text-align: center; + padding: 1em; + position: fixed; + bottom: 0; + width: 100%; +} \ No newline at end of file diff --git a/robot_painting/image-matting/web/static/images/forkme_right_gray_6d6d6d.png b/robot_painting/image-matting/web/static/images/forkme_right_gray_6d6d6d.png new file mode 100644 index 0000000000000000000000000000000000000000..bbccd4b346beeefb797b5437ccc20cf90428cc58 Binary files /dev/null and b/robot_painting/image-matting/web/static/images/forkme_right_gray_6d6d6d.png differ diff --git a/robot_painting/qmupd_vs/.gitignore b/robot_painting/qmupd_vs/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..34e5c58d3f91f2a6d05dd5885a1ca70e577c0746 --- /dev/null +++ b/robot_painting/qmupd_vs/.gitignore @@ -0,0 +1,47 @@ +.DS_Store +debug* +datasets/ +checkpoints/ +robot_data/ +results/ +build/ +dist/ +*.png +torch.egg-info/ +*/**/__pycache__ +torch/version.py +torch/csrc/generic/TensorMethods.cpp +torch/lib/*.so* +torch/lib/*.dylib* +torch/lib/*.h +torch/lib/build +torch/lib/tmp_install +torch/lib/include +torch/lib/torch_shm_manager +torch/csrc/cudnn/cuDNN.cpp +torch/csrc/nn/THNN.cwrap +torch/csrc/nn/THNN.cpp +torch/csrc/nn/THCUNN.cwrap +torch/csrc/nn/THCUNN.cpp +torch/csrc/nn/THNN_generic.cwrap +torch/csrc/nn/THNN_generic.cpp +torch/csrc/nn/THNN_generic.h +docs/src/**/* +test/data/legacy_modules.t7 +test/data/gpu_tensors.pt +test/htmlcov +test/.coverage +*/*.pyc +*/**/*.pyc +*/**/**/*.pyc +*/**/**/**/*.pyc +*/**/**/**/**/*.pyc +*/*.so* +*/**/*.so* +*/**/*.dylib* +test/data/legacy_serialized.pt +*~ +.idea +txt_output/* +vo/* +*.xlsx diff --git a/robot_painting/qmupd_vs/.ipynb_checkpoints/operator_main-checkpoint.ipynb b/robot_painting/qmupd_vs/.ipynb_checkpoints/operator_main-checkpoint.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..262e64d7e3e3c6229217103081b3e0f1586da746 --- /dev/null +++ b/robot_painting/qmupd_vs/.ipynb_checkpoints/operator_main-checkpoint.ipynb @@ -0,0 +1,589 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'ipywebrtc'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mipywebrtc\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mCameraStream\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mImageRecorder\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mIPython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdisplay\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdisplay\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mPIL\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mImage\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mio\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'ipywebrtc'" + ] + } + ], + "source": [ + "from ipywebrtc import CameraStream, ImageRecorder\n", + "from IPython.display import display\n", + "import PIL.Image\n", + "import io\n", + "import numpy\n", + "import cv2\n", + "from ipywebrtc import CameraStream\n", + "camera = CameraStream.facing_user(audio=False, constraints={\n", + " 'facing_mode': 'user',\n", + " 'audio': False,\n", + " 'video': { 'width': 512, 'height': 512 }\n", + "})\n", + "display(camera)\n", + "recorder = ImageRecorder(stream=camera)\n", + "display(recorder)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAicAAADCCAYAAACSRmLFAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAADBJklEQVR4nOydd3hUZfq/7+kzyWTSe28EQiCU0EITkCoWEHQtYFt3ddW1Ytvvquuuva5rXXVxERVlsaKAVOkt1JCQhIT0nkmbTJ85vz9yzfkxJoEEqXru6+LSmXPmnDOT97zneZ/yeWSCIAhISEhISEhISFwgyM/3BUhISEhISEhInIhknEhISEhISEhcUEjGiYSEhISEhMQFhWScSEhISEhISFxQSMaJhISEhISExAWFZJxISEhISEhIXFBIxomEhISEhITEBYVknEhISEhISEhcUEjGiYSEhISEhMQFhWScSEhISEhISFxQnDfj5K233iIhIQGtVsuoUaPYvXv3+boUCYk+IY1diYsVaexKXCycF+Pk888/54EHHuDJJ59k3759ZGZmMn36dOrr68/H5UhI9Bpp7EpcrEhjV+JiQnY+Gv+NGjWKESNG8OabbwLgdruJjY3lnnvu4dFHHz3l591uN9XV1fj5+SGTyc725Ur8ShEEgfb2dqKiopDLe2enS2NX4kJAGrsSFyu9HbvKc3hNANjtdnJycnjsscfE9+RyOZdeeik7duzo9jM2mw2bzSa+rqqqIj09/axfq8Rvg4qKCmJiYk65nzR2JS40pLErcbFyqrF7zo2TxsZGXC4X4eHhXu+Hh4dz9OjRbj/z3HPP8be//a3L+xUVFRgMhrNynRK/ftra2oiNjcXPz69X+0tjV+JCQRq7Ehcr33zzDQsXLjzl2D3nxsnp8Nhjj/HAAw+Irz03psFgkG4SiV/M2XRRS2NX4mwijV2Jiw0fHx/g1GP3nBsnISEhKBQK6urqvN6vq6sjIiKi289oNBo0Gs25uDwJiR6Rxq7ExYo0diUuNs55tY5arWb48OGsX79efM/tdrN+/XrGjBlzri9HQqLXSGNX4mJFGrsSFxvnJazzwAMPcNNNN5GVlcXIkSN5/fXX6ejo4JZbbjkflyMh0WuksStxsSKNXYmLifNinFx77bU0NDTwxBNPUFtby5AhQ1i9enWXZC0JiQsNaexKXKxIY1fiYuK86Jz8Utra2vD396e1tVVKzJI4bc7HOJLGrsSZQBq7EhcrK1asYN68eaccR1JvHQkJCQkJCYkLCsk4kZCQkJCQkLigkIwTCQkJCQkJiQuKi0KETeLc4HA4OHbsGIWFhQQGBpKenk5wcLDUR0NCQkLiLCAIAnV1deTm5lJVVYVerycpKYm0tDRRrOy3imSc/MawWq0oFApUKhWCIGCxWMjPz2fTpk389NNPbNq0ifb2dtRqNWFhYVx33XXMmzePiooKhg0bRmJi4vn+ChISEhIXDVarlerqarRaLQEBATgcDmpraykpKaGoqAiZTEZ8fDzJycmYTCYOHDjAtm3biI2NpbW1lbS0NEaMGNHrBo+/FiTj5FeKIAg4HA6MRiM1NTWUlpaye/duVq9eTXBwMMnJybS3t7N//36qqqpob28HIDQ0lAkTJqBQKMjJyeHll1/m9ddfx8fHh4iICP7v//6Pfv36kZGR8Zu37CUkJCQ8CIKA0WjEbDZTVVWF0Wikvr6e5uZmWltbvRaFdrud/v37ExUVRUZGBocPH+bbb7/F6XSSlZVFamoqLS0tVFZW0tLSQm5uLmFhYQwcOJCEhITfhKEiGSe/IhwOBwUFBezcuZOVK1fS0NBAQUEBFosFs9nste+JSpHh4eFERkYybdo0XC6X2AgsNDSUqVOn8tlnnxEbG0t+fj4LFixAq9UyZMgQ7rvvPi677DL0ev05/Z4SEhISFwp1dXUcPHiQsrIympqa0Ol0hIWF4ePjQ0xMDK2treh0OqxWK1arlbi4OKxWK3V1dbS2trJmzRrkcjk+Pj4IgkBxcTE7d+4kOzsbq9WK2+0WDZUDBw4QHx/P2LFjSUxM/FUbKZJx8ivAZDKRk5PD66+/ztq1azGbzQiCgEqlIiQkhMGDB6NQKNi0aRNOp9Prs2PHjmXo0KGsWrWKlStXUlFRgcvlErdXVVXhdDrJzc0V37NarezcuZMbb7yRrKws7r//fubMmYNKpTpn31lCQkLifFJWVsa+ffvIz8/H5XKh0+nQaDQMHjyYsrIyamtraWtrQ61WExoaiiAI+Pr6UltbS2NjI/X19cTGxhIaGopGo0EmkyGXywkLC8NkMlFVVUVkZCTt7e3Y7Xbkcjl+fn7U1dWxZMkSUlNTueKKK361mjOScXIR43A42LFjB08//TRbt27FZrMBoFAoGDduHA888ABpaWkcOnSITz75xMvoAIiKimL06NG89dZbWK3Wbs+RmJhIYGAghYWFXbYplUoSExO59dZbycvL4/7778ff3//Mf1EJCQmJC4S6ujp27NhBbm4uZrOZIUOGkJiYyJ49ezAajZSXlwOd87NnTtbr9YSEhLBt2zYUCgVtbW1ER0cDnQ0WPYvJ8PBwGhoakMvlNDU1YbFYiIiIwGKxoFAosNls2Gw2HA4HarWaFStWMGnSJBISEs7Xz3HWkIyTixCn08n27dt57bXX+PHHHzGbzajVapRKJYGBgTz66KNcf/31fP755zz88MMcO3asi2Gi1+uRyWT861//wm6393iu0tJSWltbu91ms9nQaDT84Q9/4IUXXiAnJ4dnn32WjIwMqcJHQkLiV0VLSwubNm0iLy8Ph8OB2+0WwzAFBQWEhYVxySWXcPjwYerr64HOhos6nY7AwEB27tyJWq3G399f9GDLZDJUKhUKhQKXy0VNTQ0ulwsfHx/8/PxobW2lurqaoKAgGhoaEAQBmUyGWq2mtLSUpKQk1q9fT3x8POPGjUOr1Z7Pn+iMIhknFxnNzc28+OKL/POf/xSt6WnTprFo0SI0Gg2RkZGEhoZyzz33sHTpUnrqTmCxWOjo6Ohxu4f6+voeE18FQWDJkiXMnTuXv/71r7z66qtMnDiRhx56iHvuuQc/P79f/H0lJCQkzjfFxcWsWrWKjo4OwsPDaWpqwul0iguxsLAwoqOj+fbbb2lpaUGhUOB2u8VQTUFBAS6XC7fbjVwuF/9FRUWhVqtpaWnB4XDQ1tYGdHq/9Xo9ra2tuN1umpqagE5vtWeh2dzczO7duxk/fjwmk4mPP/6YyZMn/2pyUSTj5CKivr6em266iTVr1iAIAqGhoTzyyCP88Y9/FJNSbTYbf/zjH09qmABdPCknw+12n3Tb//73P8rLy3nqqaf48MMP+cc//sG3337Lfffdx+zZs6WEWQkJiYuWwsJCvv76a4KCgsjIyGD//v0EBAQwffp04uPjgc5QzyeffILdbkelUiGXy9FoNCgUCpTKzsesp4zY6XQiCAIKhQKZTEZxcTHQ6c1OT0+nuLgYm81GVVWVeA0ej0lKSgrNzc3U19ej0WgA2LNnDyEhIURGRrJq1Sr8/f255JJLiI2NPce/1Jnl4jevfiPU19ezcOFCVq9ejUKhIC0tjU8++YQHH3zQ6+G/du1aPvvsMwRBwMfHRxzAP0ej0TBv3rxelQP3lI9yIrt37+bNN99k5syZPPbYY9TW1nLjjTdyzTXXcPDgwVN6aCQkJCQuNAoLC/nmm28IDAyksbGRzZs3k5qaysKFC0XDRBAENm3ahMvlYujQoTidTux2OyaTiba2NpqammhqaqKlpUV8z1NOXFFRgSAICIKAyWSiqKiIgIAAxowZg4+PD4GBgaIRIwgCRUVFBAcHExwcjFKpFMPnx44dY/PmzURGRpKQkMCyZcvYsGEDJpPpfP58vwjJOLkI8Bgma9asISUlheXLl7N9+3amTp3qtZ/b7ebTTz/FbreTkJDAnXfe2cX48LgS//Of//Dhhx8SFxcnbuvXrx9Tpkw57essKCjg+eefZ8mSJTzyyCMMHz6cVatWMX36dB555BFKSkokI0VCQuKi4ETDpLm5mbi4OObMmcPMmTO9cjtaWlqoqqoiJiaGPXv2UFtbi9vtFj0ogBjeSUlJYdq0aQiCgEajQa1W43a7sdvtCIKA2+2mtraWgwcPYrPZaG1t9fJyu1wu8vLy8PPz45JLLkEul6NWqxk2bBhZWVkcOnSI7du3k5iYiMlk4qOPPmLr1q1dpCQuBiTj5AKnqampi2Fy1VVXERQU1GXf0tJS1q1bx5w5c1i9ejXFxcU0NzcDnTfH1Vdfzbp169iwYQPXX389Go1GLENTq9W8+OKL/PGPfxT395S3RUREkJaWJt5onsTb7rj88svRarU8++yzzJs3j9tvvx2Hw8FLL71EdnY2jzzyCMePHz8bP5WEhITEGaGoqIivv/6awMBAjEYjY8aM4brrrmPQoEEoFAqvfXNzc/H19cVsNjN48GAiIyNRqVTExcWJya1paWlMmDCB6667jpCQENFQcTqdDBgwgMTERHQ6HQEBAchkMjo6OnA6nbhcLtFrIpPJxH8eUU2ZTIbL5SI3NxdBEBgwYABms1l8PX78ePLy8njvvffYvXv3RbU4lIyTCxi3280rr7zCjz/+KBomQ4YM6XH/oKAg/vOf/7BkyRIANm7cCHR6S66++mo++OADJk2aRFpaGtAZ2pk5cyYAqampTJ48mfz8fAAmTpzIunXrePvtt1m+fDm7du3ikUceQS6XM3DgQL7++muSkpK6VOXs3LlTzIF58skncTqdvPTSS/z+97+nra2Nl156iRkzZnDgwIEz/GtJSEhI/HLa29tZs2YNdrudhoYGxowZw7hx43qsQIyIiCAsLIxp06aJVZAxMTEUFxfj6+vLNddcw7XXXsvEiRNRKBRERUUREBBAcXExbrebgQMHYrVaGTt2LCaTiYEDBxIVFcWQIUOYN28ewcHBREVFodPpSE5ORi6XI5PJaG1tpX///gwaNAg/Pz+OHz+O3W4nIyMDt9vNxo0bxQRdpVJJTk4O27Ztu2gMFMk4uYDZvHkzb7/9NsnJyac0TKAz4cqTgPrZZ5+Jg/df//oXH374IQEBAV3KhrOyslAoFNxxxx0olUq+++47AK655hrGjRvHHXfcwbhx4/D39+f+++8nKiqKtLQ0xo0bx9q1a1m8eDGRkZHijVtfX8/TTz/N+PHjcbvdLF68mHvuuYfY2FiuvPJKoLNS6MYbb5Q8KBISEhccGzduJCUlhcjISMaNG3dSwwQ6F3Zz5swhPDyc3NxcAgICqKysJDIykmuuuYaYmBiqq6tFo0CtVhMUFERUVBSRkZFotVqcTicqlQqDwcDVV1/NbbfdxpVXXklGRgbZ2dnY7XaUSiUzZszg0ksvJTY2FoVCQV5eHrW1tQwdOhSNRsPhw4cpKysjIiJC9OC0tLQA0NbWxu7du9m/f/+5+Bl/MZJxcoHS3NzMokWLMBgMfPHFFz0aJm1tbd1W0yiVSm6++WZ+/PFH/vSnP2EwGGhsbOSZZ57x2i8jI4OwsDCmTJkiCgvJZDJ8fX27HFOn0+Hj48OVV16JXC4nKSmJm266iZ07d/Lcc88RGRkp1uOvXLmSAQMGAGA2m3nllVcYOnQoer2e/v37c9ttt4ky+RISEhIXAgUFBdTX19PW1kZqamq3hoknMfXnyaaCIBAeHo4gCFx22WXceuutxMXFkZ+fz44dO7z0ohITE0lOTiYlJYXDhw9jsVj46aefUKlUyGQyMQkWOufd0NBQwsLCCA4OZsyYMSxYsICpU6cSGhpKfX09hw4dIjw8HIPBQGtrK1arFYPBgN1ux2g0EhISgsvlQi6XU1tbe1F4TyTj5ALE7Xbz0ksvcfToUZ555hmGDh3a7X55eXncd9993VbTLFq0iP/85z9e5WTV1dWsW7cOi8UivqfVasUbYd++fVitVkJCQhg7dmyXYzY1NdHa2kp4eLjX+3FxcTz88MPs3r2bd955B4VCQU1NjVfopq2tjU8++YSMjAw2bNjAnj17GDlyZF9/GgkJCYmzgslkYuPGjQQHByMIAqNHj+5imDgcDvbu3Ut5eTl1dXVe2zQaDWPHjuXmm29m2LBhYglxQ0MD/fr1w2g0ivuq1Wri4uKQyWRUVVWhVCqx2Wz079+/y3V5lGL9/PzE61EqlYwZM4ZbbrmFmTNnEhAQQHNzM+np6SiVStrb20WhuMDAQMrLy0U9lMTExItCJFMyTi4w3G43//73v/nnP//JrFmzuO6667rd74cffuCyyy5j4sSJ3ZYDe5JZT0QQBKqqqrwyt+12exfPyzXXXENSUlKXYzY0NGA0Glm+fDkOh8NrmyfOmp2dTUBAQLfXfPjwYWpra3G5XCxbtozbbrvN64aVkJCQOB9YLBZWrlxJeno65eXlTJkyBbVa7bWP2+3m+++/Z+PGjXR0dHQrGR8TE9NtCw9/f38xvAKd825VVZXowRg9ejQajYbhw4d3+WxTUxMajYaKigqxwMHzOa1Wy6hRoxg2bBgdHR1UVlaKxRLNzc0oFAp0Oh1qtZqKigr8/Pz44YcfKCkpOa3f6VwiGScXGHv27OHxxx9Hp9Px0EMPidb3iRQUFLBo0SIGDhzYo/HSHfv37yckJASdTie+d+jQIVGG3nPzDBo06KSW9ZIlS9i5c2e3hkpERASpqalA5+rglltuEbPbZTIZlZWVpKenExERwTfffMOtt94qGSgSEhLnDUEQ+Omnn/D396ejo4P+/fsTEhLSZZ+9e/eKyaUjRozo9fEbGhpYt26dl9FSWVnJTz/9hNvtxu1243A4CAgI6FGXymQyYTab+emnnzh+/DiffPKJl7Hj8ZB7jCDPAtRgMCCXyxkyZAiVlZW0t7cTEBDAV199dcEbKJJxcgFRV1fHQw89RHNzM7///e/Jysrqso/JZOKuu+6irq6Op59+GqvVyoYNGwC84ohut1tsOgWd9fHr168nKirKy9PicrmIjY0lMjJSLE1LSUnp9vqsViuCIIhdNpcsWcLChQvZtm2buI9MJhMNKo1Gw7333itWB4WEhDB+/Hhqa2uZO3cuMpmMb7/9lqeffrpLt2QJCQmJc0FhYaEY0i4uLmb8+PFd9vF0IPb19WXw4MG4XC5++OEHHA4Hra2t4txrMpnEvjoARqORqqoqWlpavDzK/v7+TJ06FR8fH5xOJ0qlEoPB0KVMGTp7qUVGRhIYGIivry/ff/+9OJ/D/1ePhc5wj9vtJjExUTyPw+Fg8+bNBAUFUVFRgd1uJywsjG+++UaUy78QkYyTCwSz2cwtt9zC1q1bSU5O5o477ujWe/Hxxx+Tk5PD66+/TmZmJs888wybNm2ioaGBxx9/XKzGKS8v5y9/+Yso4FNeXs7q1au7CLdBZ3KW1WqluroavV7fJafEw7p163A6naSkpJCUlITZbMZqtfLNN98gCAIrV65kw4YNXoaGXq8XlRRbW1sJCAjAaDQSERGBr68vgiDw/vvvs3PnToCTNiGUkJCQOJOUl5fz9ddfYzAYyM3NZeTIkV3C5E6nk7Vr1+Lj44NcLmfo0KGUlpbi4+NDQUEBW7ZsobKyEoAjR45w4MABsRfOkSNH6OjoQK/Xe3lOZDKZqGPi5+dHS0sLwcHBXa7PoyIrl8sxmUwkJSXhdDpJT0+noqKClpYWvv/+e1avXo3b7RaNm6CgIDGkExgYKBpHHR0dNDQ0MHjwYGw2Gxs3bkQQBCorKy+4BaJknFwAOBwOnn32WX788Uf8/Px49tlnu41nNjc3s2TJEv71r39xww034HQ6+fHHHxk+fDgvvPAC69atw2w2YzQayc/P5+OPPxb7M3z22WfI5XKuuOKKLsdNTk7GYrFQU1ODn5+f2Mr7RARB4NChQwCMHDkSlUolbtu5cycOh4OlS5cyd+5cr1I1hULBgw8+SHBwMDabja+//lrc5jG+zGYzTzzxBCaTiXvuuYctW7ac1u8oISEh0Vuam5v54Ycf8PX1xWKx0K9fP4YNG9Zlv2PHjhEQEIBWq2X27Nm0trYSGhpKU1MTW7Zsobq6mrKyMvLz88X/5ufn43A4OHjwIGq1WkxUPZG2tjZcLhcGg4Hm5uYuoSTo1FxxOp34+Pggk8lEAyYkJARBECgrK+PYsWP4+fmhUChEAyM0NJTMzEwqKysZN24cycnJNDQ0AODr68uuXbvw9/cnPz+fnJwcioqKyMnJOdM/8S9CMk4uAL7++mtefvllZDIZL7/8MvPnz+92P5fLxZNPPskNN9wgPtgDAwPp168fa9asISsriz179jBlyhR27txJW1sbzc3N2O12vv/+e/r160dUVFSXYwqCwPbt22lra2PAgAE9dhNOSUnBz8+PhQsXIpPJMBgM4goAEEWETgwnAUyZMoXf/e53gHfoKTQ0lLi4OPr378/OnTvF8NTixYsvilI3CQmJixOHw8GaNWtoaWmho6OD0NBQrrzyym5z/JxOJ+Hh4Vx55ZVoNBoEQaC1tRW73U5dXR0tLS2UlpayZcsWjh49itVqpa2tjZqaGmw2G3K5vNvFptvtpri4mNjYWJqbm4mIiOiyj1qtRq/XU1tbS3JyMgaDAY1Gg9VqRS6X4+fnh1qtpr6+nvj4eLG8WS6XM2nSJARBYOPGjQQEBNDS0oJer8fHxwebzYbZbGbEiBEcPHiQyspKDh48eEGFeSTj5DxTWlrKfffdh8Ph4Pbbb2fBggU9JqOGhIQwY8YMr+1DhgwRk6GKiopEKeN//OMfOJ1OysvLqaqqorCwkNmzZ3t5PAC2bt2KSqXik08+wel00q9fv25vUJlMxlNPPcUPP/wglgCPHDkSpVJJY2MjbW1tzJgxg1GjRnn1nfDw5JNPimq0AAcPHmTw4MHU1tYyaNAgrFYrP/74I+PHj+enn36SkmQlJCTOGnv27CEvLw+lUolOp2PWrFndzlvQqQU1fvx4MVlVJpNRVlZGfHw8vr6+qNVqGhoaUKlUqNVqlEolRqORsrIy3G43Wq3WK1RutVrRarUYjUYaGhqIiYkB6HZR6Ofnx5w5c9DpdEyZMgW5XE5kZCTV1dX4+/vT1tZGRkYGZrOZsrIyL30qvV7PVVddhdvtpq6ujtDQUHx8fKipqWHgwIFUVlZSXV0tGmitra0XlDCmZJycRwRB4N///jfV1dVkZ2fzzDPPeFXSnAq1Ws29996LVqslPT2dxsZGRo8ezbhx43C73QQEBJCens4333yD0+lk1qxZXp+32Wzs2LGD4cOHc+TIEZRKJZdcckmP5/Px8WHcuHFij53k5GRSU1MJDw8nICCA1NRUNm7cyGOPPYZcLkcQBC834z//+U+xRPmnn37i2muvJSoqiuXLlyMIAjt37iQzM5P29naKior6+GtKSEhInBqLxcK+ffuIjo7GarUyY8YMwsLCTvqZExeEERERxMTEkJycjFqtJiAgQAyhhIaGYjabiYiIEI2ffv36eS0K6+rqCAsLo6amBn9/f4xGI0FBQV7d5U/E47Xx5MKkpKRQUlJCY2MjQUFBjB07lvnz5xMYGEh9fT0+Pj6iPERKSgrjx4/HZrMRHx8vdjlub28nMzOTmpoaTCaTaFSVl5f/0p/3jCEZJ+eR0tJSPvroIwIDA3nrrbd6bKbXEzKZjPj4eJRKJe+88w5vv/02fn5+YtfMN954A4B//vOfXHnllQwcONDr8263W6yBr6ysZMiQIcyePbvX5w8ODubaa68V+0lAp5rhfffdR3Z2Nh0dHV5CbKmpqbzyyiuoVCoaGhr45ptvePHFF0WD7PDhwxQUFBAdHU1FRUWffgsJCQmJ3rB//37CwsKIjIxk1KhR9OvXr0+f9/HxYcyYMURFRTF16lSxD47H0EhNTUWtViMIAoIgdBGbtNlsmEwm2tvbiYqKYufOnd1WCPVESkqKGC5SKpUoFAqSkpLEeX/MmDGUlpaK+48ePZqEhAQOHjxIv3790Gg0lJWVUVZWJoaHamtrSU9Pv6DE2STj5Dzy/vvvU1NTw/z588nIyPhFxwoKCiI7OxuFQkFWVhYPP/ww8+bNIz8/n5qaGq699lrR4+FBo9Hw4YcfIpPJCA0N5d577+1W0O1kLFq0iA8//NCrBM5gMPDWW2+h1+u7qNfOmDGDq666CoAVK1ZgMpmYO3cu0JmZvmjRIoxGo9gtWUJCQuJM4fGapKWlUVpa+otVqtPT05k2bRpBQUGkpaURGBjI9OnTKS8vx+l0Eh0d3UWUMioqSmz419TURFRUFHFxcb0+p1arZf78+UycONErTyUlJYW0tDR27twpVmlCZ/7J5MmTRfl6X19fXC4XCoUCtVqNRqPBbDZTXFz8i36LM03X5AKJc0JZWZnoNbn77ru7GA6/hMmTJzNp0iRkMhk1NTVMnjy523CNXC4nMTGR4OBgdu/efUrXZnfodDqxVPhE+vfvz4ABA6iurvZ6X6vV8ve//52NGzfS2NjI888/z5tvvsm3335Le3s7paWljB49mkmTJvX5WiQkJCROxoEDBwgNDaWsrIz09PRu1VxPl8mTJ2Oz2dDpdISEhBAQENBtTzS9Xs/o0aMpLi6mtbW120qeUxEeHt6t5EO/fv3Izc3F5XLhcDjEcFJERARjxoxh8+bNDB48mH379uHj44PFYiE4OFjswNwXz/nZRvKcnCdWr15NbW0tV111VZdwy5ngxDDLa6+9dtJcFoPBcFqGyclQKpWEhYV5hXU8JCQkMGbMGACKi4uprq72KnFWKpVn1FiTkJCQ8BQLREdHU1JS0ieV196gUqnQ6/W4XC46OjoICwsTk127Izk5mWHDhvWYiHs6eAyO9vZ2rx5q0OlZUSgUCIJAYGAgSqUSlUqFy+UShTIvpHn3wrmS3xAtLS28/fbbGAwGbrnlli4Dwm63U1pa2qXr5elw4403igPvXCKXyxk/fjxFRUVdbhKNRsOf//xnfHx8cLlc/Otf/+KWW24RXZT5+fmiqJGEhITEmaCwsBCZTIbJZCIlJaWL16S5udlLUO10UavVzJo164wbP70hKCiIkJAQzGYzjY2NXtuioqJITU2loKCA9PR0zGYzOp2O9vZ2wsPDkclkXrkq5xvJODkPrF69mtzcXGbNmtVt99+PPvqIzMxMnn322fNwdWeOq6++GqvVKqq/nsgll1zCuHHjAMjJyeGhhx4SG1Y1NTWxb9++s3593XVzlpCQ+HWya9cu+vXrR15eXpd51+l0smrVKsrKyli9enUXraa+otPpzktyqUKhYPDgwRgMhm7LgseOHYvNZhP76jQ1NaFQKCgpKcHf359jx46d9Wv0NC88FZJxch5YtWoVAHPmzOniNTEajbzxxhu0tbWdUXff+SA5OZkXXniBL774oouomlKp5JprrhFjrQcOHKCwsJAJEyYgk8nOeklbRUWFKAwnISHx66a9vZ3GxkbcbjfBwcFdklSPHj1KUVERdXV1mM3mC6pqpa94uhQrFAqv5oDQmasSHR1NTU0N/fr1IywsDB8fH4qLi0lOTsZsNp81GXuXy8X+/fvZtGlTr/aXEmLPMVarlcLCQiIiIrotH/vyyy/Jz89HqVR261W52JgxYwYjR47s9ma/+uqr+cc//iG6Ep1OJzabjeDgYFEq/0xiMpkoLi7m6NGjPPXUUxw9evSMn0NCQuLCw2g0otFoOH78OBkZGV7zkdvtZufOnajVajo6OkhLS0OtVp/Hq/1lyGQyRo4cSWlpaZeqR7lczogRIygrK6OiooLk5GQOHDiATCajurqa9vZ2rFZrj5orvcHpdOJ2u7FYLAiCQG1tLcePH6ehoYHS0lKvLvYnQzJOzjE1NTUUFhYycuTILkmoLpeLL7/8ErfbzZgxY8Sk0YsZuVzebc8I6OyYecUVV4h6LAC7d+/utXS9Z+CfmNPS3NxMcXExFouFxsZGOjo6yMvLw+12c/jwYcrLy7FaraJIkYSExK+f8vJyUaTs59WFDQ0N1NfX43a7kclkjBo16jxd5ZlDq9XSv3//brclJiZiMBgwmUwUFhYSEhIiGm+nwuVyYbFYyM/P98qJ9DQUtNvtOBwOXC4X7e3tKBQKAgMD8ff3p7y8nPj4eAoLC3v1HSTj5BxTUlJCS0sLY8eO7RLSKSkpET0GV111VZ81Ry42ZDIZt956K++//75oYPTGMGlpaeHjjz8mPz+/S9tvj/elu+OEhYUxaNAgDh48SHBwMH5+fpL3RELiN0BVVRUqlQq5XN6l+69HjdrPzw+5XN4nzZGLEb1ez8CBA9m1axdms5nJkyezY8cOjEbjSRdtx44dY926dbS1tXUpcoCuc7evr6+ofHv8+HFcLhfHjx//bXhOLrQWz71h+/btXq2tPbjdbl588UWqqqoIDAzsIjV/KlwuFzKZ7IIoBXO5XOTl5XHgwAHGjx9PbGxsl+/rITExkfj4+C5GQn5+PjabrVtrPicnh/vuu69P3g9fX18WL17Mpk2bWLRoEePHj8fhcJy01E9CQqIrVVVVaLXasxr6cLvdZ2wuczqd1NbWkpWVRU1NjVdIp7W1le3bt+Pr60tbWxvZ2dm9Pq/b7aa2thatVktgYGC38/q5QhAE6urqyMvLo7i4mIEDB5KUlERYWFi33yc+Pp6dO3cik8lobW0lPj6e/fv3I5PJaGpq6jass2/fPgRBwG63i+q3Hn7+7NFoNOh0OhoaGlAoFGRkZGC1WkWvzRdffHHK73RRGyfXXnstWVlZjB8/Hr1ej1KpJC0tTUyy9Eizny4ul4tVq1Yxa9asM3KjuN1ucnNzkclkXaTqc3Nz+d///gd0yrwnJyf36pjV1dWsW7eOjz/+mBtvvJHk5GS2b9/OnXfeKbrXQkJCzlqClyAItLe3iwN1165dvPLKK+Tl5VFbW0t4eDhDhw7l5ptv5vLLL+/y9/D19SUhIaGLcdLY2IjD4ejWONm1a1efwzLz588nJCSEtWvX8vjjjxMQEHBBdeCUkLhYKC8vp7q6WlQh9YRuPXNMcHAwISEhp53Q39LSwsaNG5k0aVKXxNXToa2tDUEQ0Gg0aLVar7lw9+7d2O12kpKSaGlp6dW8a7PZOHz4MIcOHUKtVuPn54dCoaCuro4JEyZgs9lwOBykpqb+otyNnnA4HJhMJiorK8V5sKSkhOPHj9PR0UFGRoYo4bB582amTp3a5XkTFBSEQqHA7XbT0NAgphi4XC7MZnOXc3oMPKPRiE6nQ6VSYbVavQwUQRBQq9VERERQU1NDY2MjOp0OPz8/goOD8ff3Jz09vdflyhe1cfLKK69QWFhIR0cH+fn5FBYW4uPjw/HjxzGbzQQHB5OYmMh1112HTqfDx8eH0NBQ5HJ5ryzcXbt28eWXX/bZi9ETLpeLkpISIiMjvUTHoFPK3ZNZfdNNN50y/tfS0sKbb77Jhx9+SFNTEw8//DDDhg1j3rx5FBcXs379esrLy+no6ODqq69m8ODBQKdQ0GWXXdbnPj4eHA4HFRUVWK1WfvjhB/bu3esll9zc3ExHRwcPPvggRqORgoICbr75Zu688062bNnCX/7yF0JDQ8XjKRQKrrjiCtasWeM10BsaGqisrOw2btra2tqna/b39+fWW2/lk08+weFwXPRVUBIS55vKykoUCgUBAQFotVrKysrw9/cXu5QXFRUhl8uRy+X4+PgQEhKCTqcT5QI8236OIAjk5eUREBBwxjzj7e3tyGQyjh49yrBhw0TjxOFwkJubS1hYmLiQ8nR474mysjK+++47HA4HgiBgMBgwGAwcOHCAyMhIvvzyS/Gh7efnJ3YJViqVTJgwgaSkpD6rwULnnLd//37sdjuFhYVi4qoHmUzGgAEDqKyspKKiAovFgkKhoLGxkWXLljFlyhSvHkKhoaFi88Hy8nLRKJPL5VRXVzNgwACv85/oMfGoyvr6+tLc3IwgCPj4+KDVajEYDFRXV+NwOJDJZCQkJFBWVkZhYSHz58/v03e+qI2To0eP8tFHHxEcHExsbCzjxo0jJSVFTHhyu9388MMPrF+/nv3792M2m+no6CAoKIikpCScTieJiYmMGDGChIQEfHx8xJV9e3s7//jHP1i0aNEZcy/W1tZSWlrKJZdc4tUTwel0smXLFqDT23Oqfg8tLS388Y9/ZPfu3cyePZubbrqJgIAAbrnlFgoLCwkMDGTdunWiVf3666+Ln5XJZAwfPpwFCxZw66239tmyf/PNN3nqqadwOp3dWtgevvjiC3x8fGhsbCQ5OZl///vf3HHHHRiNRj744AMvD0pmZiYKhcJrMmpra2PdunXdGieeCa63zJ49m4yMDG6//Xaam5vZvn07EyZM6NMxJCQkOqmurqalpUX0TJeXl+Pr64vT6cTlcuHv709bW5uYDOnJUbDb7djtdpRKpbjKDggIIDg4WJyHKioqUKvVOByOHhPp+0pVVRV+fn7U1dV5ybM3NDTQ3t5OdHQ0x44dY/DgwT0uWgVB4Pjx46xbtw6r1YpCoSA5ORk/Pz927dqFy+WiurqahIQEcZHZ3NxMe3u7eIzPP/+csLAwpkyZQlJSUq+fKyaTiS+++IK6ujpcLheCICCTyUQjy9Mrp6ioCLfbjVKpxNfXV/RouVwu1qxZIwq0QacREhoaSl1dHRaLBbVaTVBQEI2NjRQWFjJ58mQvD5NMJkOj0YhJsM3Nzfj7+xMaGoparRYbHVZUVGC325HJZGi1WqKioigtLUWpVJKTk4O/vz8dHR29+t4XtXGyYMEClEoll19+OSUlJSxfvpz6+nqCgoKQy+UMGDAAvV7P2LFjue6664iPj6ehoQGtVktLSwutra3s3buXFStWUFdXR2BgoJgsVVlZSXR0tCgUdiaw2+1YLBYx8cqD2+0WQwwDBgw4qZz92rVreeaZZ6isrOS7774jNjaWb7/9lieeeEJ0l51M5EYQBPbu3cv+/fv55JNPePPNN3ulZGiz2XjjjTd4/vnnexUOObGr8NVXX83DDz/MSy+9xL333suiRYt4/PHHCQsLQyaTYTAY0Gq1XtnfgiD0mKyalZWFTCbrVfKsXq/noYceoqSkhNLSUmw2G7Nnz+byyy/vdWKWhITE/6ehoYH09HTa2tpwu93YbDY6OjoYPnw4jY2NGI1GIiMjMZlM1NfX43K5qKysRKfT0a9fP5xOJxqNBh8fH3F1X1lZKd7zycnJPVaanA42m00MYXh6zUDnfOx2u6mqqsLhcHTxFpy43/r16ykuLhZTByIiIjhy5AiHDx9Gr9cTExOD0WiktbWVAQMG4Ha7KS4upqGhAY1GQ0REBA6HA4fDwYoVK8jOzu5VJ+La2lq+/fZbampqxDwPnU4nKrsKgkBaWhr19fXY7XZ0Oh0KhQKXy0VVVZVocISFhfHll18ycuRIUlJS0Gq1+Pr6isdbt24dCoUClUpFe3s7HR0dXgtXpVJJaGio6ClxuVxiB+aamhrMZjMGg0E07mQyGZmZmbS0tGCz2QgPD+fo0aMcP3681zIRF7VxIggCN9xwA++8844oS1xcXCyWMe3Zswe73c6aNWv47rvvkMvlHDhwAJ1Ox6effsq0adOYP38+drud2tpaDh06RG5uLgCTJk1i7ty5XoP5TCCTybqs/D0eFegMQXQXdrBYLOzatYtbb72VyspKJk2axKuvvsqOHTsoKiry6kLZG1wuF7t37+a+++7jhRdeYMiQIT16URwOB4sWLeLdd989rQd6SUkJd999N5GRkXR0dPDGG2/www8/MHXqVCIiIhg0aBA6nQ6TyYRcLkelUmGz2cjNze02Ma4v+TPz5s1j0KBBvPHGG6Lqo8Vi6VVCloSERFc8oZdbb70VpVJJZWUl5eXlYlm/QqHAYrEQGhpKcHCw2A3Xbrcjl8u59NJLsdlsNDQ0UFtbS2trq+jl9fX1JSQkpNumdr8Eq9WKSqXyCqlUVlaKi6P29nYxBOPBZDLR0NBATk4OFRUVBAUFUV1djclkoqamhrS0NFpbW5HL5Rw/fpy4uDiMRiPbtm3rcu7jx48TExPDoEGDKCgoIC8vj8bGRtLT0wkPD0epVHaZf+vq6vj8889FL1VKSgp5eXk4nU7a29txu90IgiCqaXu8G57+OWlpaeJ+Op2OyMhINmzYwIYNG0hNTcVoNCKXy4mJiREVY+Pi4igtLaW9vf2UXnVPHx+PB72jo0N8dmk0GrKysli+fDkulwutVit6x3v7rLqojZP58+cza9Ys0VozGAwMHTpU3O7pbOuxON1uNxs3buR3v/sdX331lagjolariYuLIy4u7px0Zfx52MZms3UrpW61WiktLeW9995j69atHDlyRLz58/LyRO9ERkYGw4YN6/IQz8rK8srvaGtrY8eOHTidTvbt20dHRwf79+8X45FpaWmEhYUxYsQIhg8fjk6nw2az8dJLL/HZZ591a5hERESg1+uRy+UMHjyYgIAARo0addKcFqfTydq1a/nss8+YNm0aW7duxW63A505MYGBgdTW1vb4eY9L81Sek5CQEB544AFxklEqlRdlhZeExIWEJzzgCREEBASQkZEhbvc8NI1Go/gwqqmpwel0cvDgQSZOnIhWqyU2NpbY2FgcDodonHjKfc80dXV1xMbGei38bDab+ED3IAgClZWVFBQUcOTIEVpbW1Gr1YSFhWE0GkXvgdPp5PDhw2J+iVqtFues8PBwr2M6HA6MRqOYR6dUKhkyZAg2m421a9diMplQKpX4+fkRFxeHWq3G6XRy6NAhzGYzAQEBpKWlcfDgQVwul6jH4pn/NBoNcrmcwMBAr3C50WgEICkpCavVSlVVFYIgEBAQgMlkIj4+Xgw7xcfHU1ZWhtls7vH3//mi0N/fXwxhCYKA0+kUPeAjR47EZDJRXV2NUqmkvLyciIgIMXH6V1+tU19f3yuZc8/DTC6XM2XKFCZPnsyePXtwOp2iJV1YWMiyZcswm81otVouvfRSEhISCA0N7ZU4TV/4+R/5+PHj4sPZ7XbT0dHB5s2befXVV9m5cydms1kciOPGjeMvf/kLAwcOFA0AhUJx0q7DJ3LLLbcAnd4Dj/dk+fLlFBQUsGXLFurr63nnnXfQ6XTI5XJR6e9EfHx8GDVqlOh5Cg8PRy6Xo9Vqez2xzJ07l6KiIvz8/PjTn/6ETqfjpZdeYseOHVRVVZ30s6mpqQQHB9PQ0NBlW0pKCqmpqaxatYr58+czcOBAMRzVW3E3CQmJnvE0lHO5XN3maHjmgBMXRj4+Prz33ns4nU7q6+uJiYnB7XZz8OBBmpqaxIet2+0mMDCQlJSU007a78v30Gq14txZVVXFTz/9RFFRET4+PgwaNAi32y0mziYkJBAfHy9el0ql6uLhkcvlovfCg9vtFoUfa2trsdvtHD16lPLycux2O+Hh4ZhMJoKCgujo6MDhcOB0OlGpVAQEBBAQEMChQ4fEhWlAQADDhw8XjcOwsDC0Wm2v5t+SkhJ+/PFHKisraW5uRq/XU1paSlxcHDqdDq1W26NnIy4ujry8PGQyGSqVCn9/f+rq6sS/m81mIyAgAJvNRmZmJitWrCA8PJyIiAicTicFBQVUVVVx5MiRXv19LmrjpKKigvXr13Pdddf1ulzWarVy9OjRLh0pV6xYQUREBCqVik2bNnHddddxySWXEB8fz9NPP31GrPkTk5hOZPv27aJXYs+ePUyZMoUDBw6IbtCxY8eyY8cOhg8fzhdffEFkZOQvvhbPDTl58mQmT56M2+1my5YtzJ07l5iYGJKSktiyZQs+Pj5iyKW6upqQkBD+85//MGPGjF8U8lKpVIwePZoXX3yRpUuXMnnyZBYuXMiDDz7InDlzTuo58fX17fHcDoeDffv2oVAouOSSSzAajbzzzjtehklsbCyTJ08mODiYV1999bS/g4TEb5GOjg4UCgUHDx4kNTVVTL48GU1NTWLSrOde9FT+OZ1O0ftaX19PY2MjTqeTjIwM/Pz8fvH1dudltdvtojfHZDIhCALff/89Pj4+DBkyhOrqanbt2kVAQADt7e1cfvnlXt6hvuCpWILOxRNAeno6JpMJk8nE3r17qampEY2F+Ph4cZvD4RC9E4MGDWLMmDGEh4eftp5KTEwMZrOZoKAgampqMBqNBAUFodVqsVgsJ/Usn+h1ksvlXkaMTCbDz8+PxsZGQkJCqKiooKamhvj4eOrr62lqagI6vTy9lffo0xP3ueeeY8SIEfj5+REWFsZVV11FQUGB1z5Wq5W77rpLzMC++uqrqaur89qnvLycyy67DB8fH8LCwli0aNFpuds9SaGjR4/m9ttv57///S/l5eU9Wn5ut5tPP/2U4uLiLsbMtGnTuOWWW7jllltYvHgxr776Kvfffz/Lly/n22+/7fO1dUdERATx8fFiXgt0Zr5/9dVX4muz2SzW3kOny/Spp55Co9Fw3XXXnRHDpDvkcjnjx49n+vTpXH/99fzvf/9j9+7d5OTksH//flavXo2fnx/jx4/n8ssvPyO5OPHx8chkMmw2G6tWreIPf/iDODlA5w3cV32WsrIy6urqiI+PZ9q0aXz//fdiPFWhUKBWqzGbzbS0tHD99dd7ffZcjl0Jid7S3bzrUTX1cC7HrifB87vvvuPtt99m8eLFbN++ncbGxm69k558Oc/97nnIqdVqWlpaGDNmDJdeeimZmZn4+fnhdrs5cuQI69at63MuXXdER0djs9kwmUxi+LygoIC2tjYcDofoCerfvz9DhgyhtLRUTPp0Op0EBASc0QRdD3q9XuyxFhcXR3p6Otdffz2hoaEMGjSIefPmMXToUMLDw9FqtUyePJmoqKhfJPSmVCrx8fGhublZrDayWq1YrVaUSiUOhwNfX99TGoUeJW5P92WlUkm/fv1oaWkRVbhlMhnt7e00NDTg7+/PjBkzuPvuu3stzdEnz8lPP/3EXXfdxYgRI3A6nTz++ONMmzaNvLw8MZno/vvv5/vvv2f58uX4+/tz9913M3fuXDFJyOVycdlllxEREcH27dupqalh4cKFqFQqnn322b5cjtioqaSkhJKSEv7zn/8QEhJCdnY2M2bMIDg4mJaWFoKDgzEYDGzdupWXX34Zi8VC//79vf7Iw4cPF/9fLpczf/58BEHg5ZdfPmN9WNRqNT4+PnzxxRcsWrSIlpYWbrzxxi7Zy55y3wceeAA/Pz/RkEpMTDzp8T1xP+hcqbS1tdHe3i5aqxEREV0aQZ2IXC7HYDCQlJSEQqEgKSlJ3BYZGUlgYGCPN4bVavUSBfJ8j+jo6B7bh6elpXmtatrb2zl69ChDhw6lsLCQRx99tNvP6fV6+vXrR3V1dY/fZcKECQiCwPPPP49KpSI+Pp4//OEPzJw5E7fbzeOPP85VV13l9ZlzOXYlJHpLd/PunDlzvPY5l2NXo9EgCAJRUVE0NzfT2NjI2rVr2bBhA8HBweh0OiwWC76+vmi1WmpqasTqErfbLQqryWQyrr32WjFsHhoayhVXXIHVamXlypWoVKozEorVaDSivlVpaSlut5uVK1ciCAJ6vZ60tDT2799PcXExhYWFREZG4u/vL2pmJSQk9KhN4nK5sNls1NfXA52VTK2trWICrkKhIDo6msTExB71lVQqFTExMWKI50SZCYvFIs5zP58LHQ4HlZWVHD9+3GveVSgUxMTEEBkZ2SWp1ZObUl9fj6+vL4MGDWLr1q34+vri4+OD3W7nkksu6TYZNjQ0FKVSidvtxu12U11d7ZVEfOzYMXx8fDCbzdhsNtLS0qisrGTy5MkMHToUvV6PIAgnlaA4kT4ZJ6tXr/Z6/dFHHxEWFkZOTg4TJkygtbWVDz/8kE8//ZTJkycDsHjxYgYMGMDOnTsZPXo0P/74I3l5eaxbt47w8HCGDBnC3//+dx555BGeeuqpPim6rl27lqVLl7J161aKi4tpa2ujoaGBb775hm+++UbMmVAoFMjlcjF0EhYWxsyZM095fJlMxuWXX96HX6h3lJWVsXr1apYuXcpPP/3U5Zy/+93vePPNN8Wqnvb29h7FgcxmM3l5eezcuZO8vDz27NmD2+2mrq6O5uZmMXlKJpMRFxfHDTfcwJ///OdujRS3201zc7OYSHUiVqu12xLiyspKPvroI7777juxwZ4HjUZDbGwsY8eO5e6772bAgAFeN5hGo+lywwmCQHNzs7ia6A5P0mxPyGQy5s6dy4EDBygpKeHKK6/kk08+8fL2PPjgg0yZMkV8fa7HroREb+lp3vVwrsfuwIEDOXr0qBgO0ev1Ygnq+PHjEQQBf39/bDYbTqdTDGvk5+eTkpLi9aD/eT6fTCZDp9Nx9dVXIwjCGZODl8lkhIaGsnHjRpqamsQKovj4eA4fPozL5cLHx4fLLrtMXDQdPXqUwMBA0UDySB148jQOHz5MVVUVbW1t4gP3xGR9z9wmCAKhoaFMnz69WwVam81GRUUFOp2uiwS+xWIRk0w9ybCCIHDw4EG2bt1KR0eHKMlgNBoJDg6msbGRvXv3olQqGT58OGPGjPGa+zzH97Q8EQRB9IKo1eoevUS+vr5e38lgMJCdnc3333+PIAg4HA5iY2OpqakhMjKS0tJSL2+/IAhs27atyzOvJ35RzolHqdPzEM3JycHhcHDppZeK+/Tv35+4uDh27NjB6NGj2bFjB4MGDfJKJJo+fTp33nknR44c8aq28WCz2cQyUEB8SKampvLaa69htVppbGxk48aNbN26laKiImpqasjKyqKpqYnm5mZR+0Ov1/PII48wceLEX/LVfxEWi4UFCxZ0qdDxGCbvvvuul/Gg0WjQ6/VinsgLL7zAsWPHGD58OMuWLWPXrl3dVvv8nKNHj/Lkk0+ybt06XnjhBXHweqiurmbfvn08+OCDXT6rUqnQarXU1tZSWFhIZWUlx44d49VXX6WyspLAwEDsdjuRkZGUlZURGhrK22+/zV133cU777zDV199xRNPPMGtt9560gRjT87IggULTvpdTlbmFhERQVZWFm+88QZRUVG8+uqrXjdnY2Njl+94rseuhMTp8nOF5HM9dsePH8/EiRNpaGigpqaGgoICcSXuqWBRKpUoFAoUCgVWqxWTycTgwYOZOnVqr0K1Z7pip729HZvNRlNTk6hJYrVaKSwsxOFw4OfnxzXXXOO1CNRoNCQlJVFVVYXJZGLTpk0EBARgNBopLy8nKSmJUaNGsXPnToYOHYparRZl2g8ePCh6qg8fPixWKBYWFqLVapk4caL4HYuLi4mJicHhcHQxxjzzlkajYd26dQQGBtLY2Ijb7SYqKoqjR4/idDppbm7Gx8eHuro60XMyceJEtm7dyooVK5g+ffpJF3RNTU1kZmbS3t7eY3GF5+/pCbV5FsEymUxclCYmJortRSZNmuRlmBw+fJhNmzb12uA8bePE7XZz3333MXbsWDFRqLa2VlT9O5Hw8HAxwdEjE/zz7Z5t3fHcc8/xt7/9rcdr0Wq1xMTEsGDBAhYsWCBmO3syjz3/ALGq5HzTnTGRmZnJm2++2a1Xw9/fX/ydx4wZg8vl4ttvv0WlUjFu3DgEQaC4uJiKiopu47Se7Oq0tDSCgoIoKiryuhHdbjcvvfQSISEhDBo0iNbWVlEDADrLcvv160d+fr4oZfz2229TUFBAeHg4er1elE6GzrLuSZMmcc0114hiat9++y1ZWVmi6FtkZCQGg6Fb0bhT6RxkZ2fz8ccfd7ttyJAhhISEcOTIEaZMmUJ0dLS4rby8XPSqnMj5GrsSEn3BM++OHj2anTt3Audn7Or1evR6PYmJiYwaNQqbzYbRaMRsNhMREYHFYhFzGaAzpB0bG3ta0u1nAk8Pmbi4OCIiIqisrBS3yWQyZs2a1a13uq2tDYPBIErvexTGS0pKyMvL48iRI5jNZlER98Tz5efn43K5sFqtaDQaVCqVmEzs8cZ4Gg/a7XauuuoqcnNzkcvlpKWliSGhLVu2iA38PNVDw4YNY/fu3ajValFMLTAwkLKyMoYPH87x48f59NNPEQSBlJQU9u3bJ3qKQ0NDRS+5pzBEEAT69+9PeXl5j8aDwWDA39+f+vp6FAoFHR0d5OTkIAgCcrkcmUxGQEAAZrMZmUxGWlqa+Nn9+/ezZcsW8ffoDac9Uu666y5yc3PZunXr6R6i1zz22GM88MAD4uu2traT9kBQqVSixalUKs/bDfFzrFarKN3rESPKzs7mb3/7GzKZjD//+c8nlWb3WNoTJkwQcypOxGg0kpOT0207a0+tfHBwMEqlssvqJTc3l6VLl/KXv/wFjUbD1q1bKS0t5bbbbhMHnlwup6mpieLiYmbNmsWQIUP45ptv2L9/PwAJCQmUlpYSHx/P73//e4KCgnjjjTe8znPieUNDQ7sYJx6FyRMHdnfExMR0q1sil8v53e9+R0tLC3l5efzhD38Qz+l0OnniiSfIyck56bHPJH0duxISJ8Mz765atYr09PSzeq7ejl2FQoGPj49YkQKcNLftXOOZD2NjYzl27JhomOh0OpxOJ6GhoV59Z07E8+BVKBRkZ2d7KaN6qK2tJS8vTyxiOBFfX19iY2PF3LugoKAujQfdbjd+fn4EBQWxfv16nE4nERER4r6CIIhz69y5czly5Ag7duzAYrHg4+NDVVWV2NOoX79+jB07luHDh3sZASdWpwYFBSEIgpf4XWNjY5e/YXcEBATQ0NCAIAhERkbidDpF9drY2FhsNhuBgYFERESISbWNjY2sX79elMTobQXWaT217777blauXMnmzZu9Ws5HRERgt9tpaWnxsuLr6urEJJ+IiAh2797tdTxPVvmJiUAnotFozrjWyPnCMzCjoqL46KOP2L59O9B5M0+dOrXHz3RXHfNzAyM4OJhp06b1+ZocDgdPPfUUYWFh/P73v0cul3e5FpvNJia0rVy5kpkzZxIREcEf//hHcR+n04nD4eiixNgTer2ecePGUVZWBnR6wMLCwrBYLKfsnzN27FjmzZvHsmXLvN5XKpUMHjyYoqIi7Ha7lzT/d99916O3RRq7Ehc6J867njYbII3dU+GZJ+12Ow6Hg0suuURsZWGz2ejXr1+Pei0/13JRKBRe+i3Q6YHKzMzs83XV1tZSUFCAw+Fg+vTpBAUFMXfuXFHeHzoNK5vNRk1NDdXV1VgsFgYPHszAgQNFY6i1tZXW1lax141CoUCj0XSRy/AQExMjykN4KpM8ia2nqsIcNmwYVVVVmM1mampqiI2NFQ240NBQampqCA0N9epv9+OPP3olwXpKo09FnwJ7giBw991389VXX7Fhw4Yu1SPDhw9HpVKxfv168b2CggLKy8tFNdYxY8Zw+PBhMbsZOhNbDQbDWV8JnG9UKpWYeDZv3jzCwsL4+uuvxXbePQ0MlUrF0KFDvX6zM0ljYyO7du0SO4d2R2lpqVg2vnbt2m4HmFKpRKfT9dpTJZfLvdRyo6KicDgcvXL7BQYG8vvf/76LgeZpQrV9+3ZSUlJEoaLGxkb++Mc/9lh5JY1diQsVad79ZahUKrE81tfXV+wH09raSlNTU48GWHh4uNgb5mxQXl4uJhN7cug8hoWHkpISsZmixWLh+PHj4n6eHjsRERFiv5/e5HMYDAb0er3o8YLOufibb745ZXVU//79iYmJEfNMEhISxDlYoVBgNBqx2+2i06KgoICSkhJRpV0mk53SO+OhT8bJXXfdxdKlS/n000/x8/OjtrZW7KcAna6j2267jQceeICNGzeSk5PDLbfcwpgxYxg9ejTQqSeSnp7OggULOHjwIGvWrOH//u//uOuuu34VVvrJUCgUKJVK5HI548aNY9++faxcuRLoTCQ6mSs0KyuLNWvWnJXrqq2tpampiQULFvRoIEVHRzNs2DCgs9po1apVZ+TcJ670pk+fLibPeXoNnYzo6OguLsLo6Gji4uLYtm2blxT2woULu1WU9SCNXYkLle7m3RM1TKSxe3I8De2ioqIIDQ3l4MGDQOec61nxd4dHgsFms52VJqH19fW0tbXh6+tLVFRUt/skJiaKhkBgYCAHDx78xbpKHr0nT+6lj48PISEhaDSaLto43REWFuZVNeTxMPn7+1NeXk5ZWZloJOXk5OB0Or2qmLpLO+iOPhkn77zzDq2trVxyySVERkaK/z7//HNxn9dee43Zs2dz9dVXM2HCBCIiIvjyyy+9fpiVK1eiUCgYM2YMN954IwsXLuTpp5/uy6Vc1MjlcsLDw3n88cdFD8SIESNOKkE/dOhQNm7ceMarPQRB4LvvviM8PJwZM2b0uJ/BYOCJJ55Aq9XicDh45plnui05Pl1CQkL405/+xLZt27DZbF1EprojLCysS7MuvV7fbS5Kb4wpaexKXIh0N+/+PEdCGrunprW1lfb2dnJychg5ciRWqxWdTudVln0iMpkMf39/9Hp9r9qk9AWz2UxlZSVms5lBgwb16PFITk5mwIABHDp0iEGDBlFSUtJjt/a+otVqKS8vF/NnBg8eLOaTnAxPzopMJqOjo0P8f8/C1vN5u92O0WgUt3uMkxO7z5+MPod1uvt38803e33ht956C6PRSEdHB19++WWXmGZ8fDw//PADZrOZhoYGXn755QsmafVc0d7e7tVj4MSKku6Ii4sjPDyce++9t1dehd5SUFDA+++/z6uvvtqjroiH8ePHixnfR48e5d133/3FCo6RkZFkZmby8ccfk56eLrpQ//vf/57SwpbL5V3GzfDhwzGZTKIKr8Ph4O233+5xAjoRaexKXIh0N+f+vJxYGru9o76+HofDgVKppKWlxcuD0B0pKSmUlJTw7bffsn379l6v+k/F3r17KSsrIyEhgaysrJPum52djdPppKOjg/j4eLZu3drrB3xPBAYG4na7SUtLY9SoUTidTvLy8qivrxcrLnvC4402GAxMmzZN7HrscDjEXnRtbW0sXbqU5uZmscrI02G+t8nSZ779o8Qp8Qj8nCzM8HMUCgUvvfQSTU1NjBs3jmeffbbXiUU9YTKZePTRRxkyZEivEmnVajX3338/Wq0Wt9vN008/zV//+lexb8LpMHXqVLZs2cKMGTOQy+Wi7kpxcfEpb0B/f38GDx7s9Z5cLufw4cNUVFQgCAKffPIJ999//1nL15GQkLg4cLvdlJaWolAo2Lx5c6/mreDgYPHhvXnzZpYsWSJqi5wulZWV7Nixg/DwcEaPHn1KA9GTU5KXl0d8fDxqtZolS5aQm5vb67LcnzN79mymTZvGFVdcQVhYGHq9XhSm88j590R0dLQounfi71BYWEhqaiput5uvvvpK9PL/3HPSW8V1yTg5h8hkMoKDg0lPT6e2trbPAzwkJISPP/6Yu+66i88++4ypU6fy3//+97RuFLfbzV/+8hcOHjzIK6+80uvyrvHjxzNp0iT8/Py4++67+eKLLxg3bhx/+9vfTtsAOPHcHg+HxWIR23n3RE9VTE1NTbjdbvbu3ctTTz3VbYmfhITEbwNPTk1hYaH4EPZ0Re4NycnJzJs3j8DAQCwWC3a7nQ0bNpxWWLu1tZVvv/0WHx8fsrKySEhIOOVnZDIZ2dnZuN1u9uzZQ1VVFR0dHXz11Vd8/fXXHDt2rM/XodPpxColpVKJWq0WvRunWhR6QlAnGhqeXJLS0lKUSiXt7e20trZ2GyKSjJMLEIVCwXXXXcdNN9102rLM/v7+PPbYY2zfvp3XXnuN1NTU01JTbGpq4vPPP+fyyy/vsca/O9RqNTNmzMBqtTJt2jTWrl3L/PnzaWxspKqqqs/X0RMBAQG9CsV4StZOxOOmbW5uPuOxYgkJiYuL0NBQBgwYQFxcnFhF0tck4MTERBYuXMjMmTNpbGxErVafNEewJ4qKirBarTidTrHBaW8IDw/HYDAQHh5OYmIi48ePx2Aw4Ovr+4s81ycyduxYgoKCxArHntBoNGi1WgRB8MqBVCgU1NfXExISctLnW29Dib+tgOMFwA033ADA66+/7vX+0aNHvfoxnAo/Pz/GjRt32texbds2TCbTaR1j1KhRCILA66+/zhdffHFWkupOVPU9Gd2VQf7aqw8kJCR6j1wuZ+zYsdjtdsrLy2lsbESj0aBQKLBYLJhMpl7lQeh0OtLS0k4pENkTTqeTsrIytFotPj4+J23B0d13iIqKQi6XU1NTQ3R0NHfeeSdut/uM9fQymUw4HI5TejY8Ym3t7e2iYSSTyVAoFNjtdlwuFyqVCrvd7tWLx7PfgAEDenU9kufkHCOXy5HL5QwbNszL4+FRjj0XFBYWct9995GWlsb06dP7/PmUlBSGDx/Ojz/+yIcffnhGr83T+dhkMnUray8hISHRV/R6PUFBQaIMfH19vVii+0tLc3vLnj17aGlpQS6XiyXefWHAgAE0NjYyatQo1q5diyAIaLXaM9KHyNfXl927d9Pe3v6Lcxmrqqqw2Wxdck08rwcNGtSr40jGyXkiJibGy1pvbGw8I+3BT4XJZOLuu+8mJCSEjz76qEcVwZMRHBzMww8/jEwm47XXXhNbep8J/Pz8xOzuxsbGUwogdZdzEhMT0+scGgkJid8OHo0Oj2Kp0+k8JzlpZWVl7N69m7a2Nvr379+nULqH1NRUtFotNpsNtVrN4cOHz9j1aTQaHA4HLpeLqqoqysvLT2q0eVqgnOjd9vy2nqqcn1fpeF73NgwlGSfnidDQUK8H6LkwTKBT7yM3N5f33nuv1xZsd8ycOZPRo0dTXl7Ohg0bztj1VVdX43A4EASBL7/8kpkzZ7Ju3boe9+9OHyY5OblP8VwJCYnfBh6Njra2Nq/S7LOJIAhs2rSJ1tZWMjMzvToS9wW5XE52djZ79uwhLS2NQ4cOnbFrb21tJSAgALVajcPhICcnhw0bNvR4/Li4OGQymahYC53qsZ4+bCd6Sn7+urc6LZJxcp7w8/Pj2muvFV8XFBScsoTrl+JwOPjPf/5D//79u5Tg9hWbzYZSqUQQhDMmCgSI1np4eDiPPPIIWq2WN954o8ebRKVSdSth//MOrRISEhJJSUkEBASgVCpJSUlBEIQzmsjfHR5FX6VSSVhYGC0tLadtVFgsFlpaWigpKaG5ufkX60x5cLlcjB49Gn9/f+Lj48nPz2f37t09PpMUCkUXz4lOp+uVcXJin52TISXEnieqq6vJzMxEp9OJ7cXPduzT5XJRXl5ORUUFl19+OQEBAUyYMIHs7GxSUlJ6naDlcrnYsmULu3btOuPXuHfvXgCuvvpqDAYDWq2WoqIiTCaTFKqRkJD4RRw7dgyj0YhGoyEtLY3CwsKzHtYxmUyo1Wr8/f353//+Jy6eEhISGDBgALGxsb2qYGlvb2fv3r2o1WrKy8vPSK6J5/o6OjpwuVy43W6Sk5PZtm0bWq0Wo9HYo7z/2UYyTs4TxcXFvPLKK6jVaiwWC2azmZKSkh57LJwpPOI5nj49n3/+OVqtlmHDhnH11VfTr18/IiMjiY2Npba2tttS3HXr1vHBBx+IaolnKltcEASqq6uRyWRkZWXhcrno6OigsrKS1tZWyTiRkJD4RVRWVhIeHk5jYyO5ubkolcqzLtAol8tpaWkhNjZWDJ90dHSwb98+cnJyCAwMxN/fn8DAQMLCwjAajd0KdDY0NNDe3k5wcDAWi+W05Sh+jt1ux+12097eLub8aTQagoKCeswP6W1V6S9BMk7OE0lJSVRXV4sy1J622GcTrVbLTTfdxKJFi3C5XMhkMrRaLRaLhe3bt7N9+3bg/5eKWSyWU1YRRUVFMXfu3DNyfe3t7ezfvx9/f3+GDBmC1Wo9ZbJtREQEMTExFBYWAnDo0KGz1kVUQkLi4iY4OJi8vDz8/f1RKBS4XK4z2iOsO2JiYoiKiqKgoIDg4GCam5sRBIGEhASxtNloNFJSUoLb7UYmk3XrFZHJZKSnp9PS0oLRaCQrK+uMtB+orq4mICCA2tpakpKSaGtrQ6fT9dgEFjrnXU+iK3Q+v/qieN4bpJyT80R0dDRjx471es9jHJxNbr/9dmbPni0O6u5in2azmcbGxlMaJhqNhpdffpmMjIwzcm2tra10dHSQlpZGVFQUTqeT5uZmYmJieswh0el0Xi24jUZjrxUIJSQkflukpqbicrnQarXU1NQQFBSE0Wg8bRn43qDRaJgxYwZqtRqj0YhWq8XX15eOjg4GDx5MSkoKiYmJBAYGAp0ClImJiV7//Pz8MBgMtLS0iEbEhAkTzsj1tbW1ib9HXFyc6JXxeGm6w9fX1yunxOVynfHfUDJOzhNyuZxp06YRFRUlShgXFBSc9bwTvV4vtl8fNWrUaR3Dk9j13HPPcc0115yxa1uxYgUNDQ2EhIR4rQhCQkK8DJCT0draesYUEyUkJH5dBAcHEx0dTUdHBx0dHVitVqxWa5dGimeauLg4rrnmGmJiYpDJZKKhsWfPHoqLiyktLaWlpQWFQkFbWxulpaVe/1pbW2lubqa6upro6GiuuuqqMxLWcblcHDhwgMzMTJxOpzjPemTsTyVOJ5fL0ev1yGSyX6yP8nOksM55JDIyEqvVKg6yoqIi7Hb7We8UqtfrmT9/PjNnzqS4uJjdu3fjcDg4fvw4BQUFJ/1sRkYG2dnZDB48mNjY2DMae/Rkhk+YMKHXyV5KpZL+/ftz4MABoFOW/2xXPUlISFycyGQysVxWoVCg1WpFwcfw8PCzeu64uDhuuOEGSktLKSgowGKx4Ovri81mw2w2n3QuNRgMREREkJCQwJAhQ3q9WDsVnp44ra2tqFQqgoKCeqVbFRAQgFarxel0iuGdMx3WkYyT80hGRgZ2u12MedbX11NeXk7//v3Pyfn1ej2ZmZlkZmaek/P1Bh8fH6ZMmdLr/eVyudgv40R601BLQkLit0d4eDgdHR2i1olcLqeqquqczLtyuZykpCSSkpLE9zwFESdDr9ef1bYcx44dIzExsdfeGF9fX1QqFVarlfb2dtHQ02g0XobWibL1nte99bBIYZ3zSHR0NOPHjxdf9yYB9deK2+3m6NGjxMXFnXbvihM5VwaehITExUVqairNzc3I5XJRm+N8zrs6nY7g4OCT/jtbhoknnNXY2EhKSsovOpaPjw86nU5SiP01oNVq+fe//811112HQqHA6XSyf//+831Z54Xdu3ezceNG5s2b10Vvpbi4uNcZ9RqNBo1GQ1lZ2dm4TAkJiYucmJgYZsyYgc1mE5NTa2trz5lK94XE7t27CQgIQKPReHmblUolISEh1NbWdvs5t9uNIAioVCqxqkcQBLHZ38lE2HrbMkUyTs4zMTExfPDBB0yYMAFBEM56WdvJMJlM1NfXd1vtcjYTTZ1OJy+++CIOh4M5c+Z02d7e3t5jJnhRUZHXYA8ODiYsLOysaxdISEhcvGRkZHDttddSXV1NRkaGqNl0rrHZbBQVFbF3794u12AymcjNzeXAgQM4HI4zfu76+nqKiorw8/MT+/Z4OFFmojt++OEHmpubCQ4OZsqUKbjdbgICAnplnPQ2X0bKObkA8PHx4ZprrmHjxo3n/Nx2u52SkhI+//xzvvzyS+rr61m4cCHjx48nJiaG+vp6vv32W9auXYvdbueOO+7g2muvPaM5Hd988w2rV69m4cKFXvkvCoUCnU5He3t7j9olmzdvJi0tTbwB1Go1Go1G6mgsISFxUjwSBedaesAjmV9QUMCRI0dE3ZPNmzcTGRmJn58fHR0dlJeX09HRgUwmY8uWLYwaNYphw4adkYIJt9vNhg0bSExMpLKykqlTp4rbVCoVLpcLpVLZrXquzWYTw2BtbW3id5LJZDidTrHbMyAWNpz4+ue90HpCMk4uEEaOHIlOpztnK363283hw4dZtGiR2C3T49Z88cUXeeWVV1AoFF1aij/66KO8//77PP3008ybNw+1Wi0OyNMpbWtvb+f5558nODiYxx9/3OsYSqVSFAfqaeVQXl7upRxrtVqxWCyMHTuWr7/+us/XIyEh8dtAoVAQHR1NfX09giCIYZ6zidlsZu3ateTl5eFwOAgKCsLPzw+VSoXNZqOyslLsOuxyufD398fpdGKxWNi7dy+FhYXMmDGDkJAQmpubcbvdPWqRnAxPbx673c6wYcNEjRXozIHxLAa7690jCAJhYWFs3rwZf39/NBoNMpkMjUaDv78/TU1NKBQKBEEQxT5PfF1RUdGra5TCOhcIqampREdHs2bNGtrb28/quRwOB2+//TaTJ09m7dq1tLa2dom3ulwu7HZ7F90VmUxGWVkZt9xyC/fddx9ms5klS5awaNGi09Jo+eabbzh06BBPPvkkcXFxXtt0Oh0JCQk4nc4eM7yPHj3qdd7a2lqqqqrIzs4+Y7L6EhISv06SkpIwm83nJE+toaGB5cuXi3mFQUFBhIaGis0Iw8PDiY2NJTMzk0mTJhEWFkZUVBQqlQqlUklwcDDl5eV8+umn1NTUsGnTJr7++us+pwK43W62bduGn58fbrebYcOGeW0PDAzEbDajUqm6TRS2WCz4+PiIXpGwsDCUSiV1dXWivIQnJ8XTCPDE1yEhIb26TslzcoHg5+fHoEGD2LBhA0aj8az0kREEgT179vDmm2/yxRdfeIVKoqOjqa2txdfXl5SUFOLi4qipqRFdi+3t7WLYZMCAARw4cID33nsPf39/Ro8ezYcffsh1113HiBEjen09TqeTpUuXEhQUxGWXXdbjfm63u8fmXJ7mXZ74qCcpKyYmBn9//zNeey8hIfHrITIykvb2djFP7XQqBZ1OJ3a7vdtcCkEQ6OjoYPPmzRw5ckQM0wwZMoQjR45w9OhR1Gq1OBfL5XIEQfCSsY+JiaGlpYXDhw+j1+txOp0sX74cnU5HW1sb27dvZ/bs2b2+3vr6evEZM3jw4B5l6lUqVbclzj4+PjgcDjIyMmhtbRXzAV0uV68Mj96G0STPyQXEmDFjaGtr49ChQ6f1+S1btvDqq6928YKYTCaWLVvGXXfdxbRp0/j444+75HA0NTXhdrvx9/cnLS2N7OxsnnjiCbZt28a2bdu4+eabUSqV2Gw2UfDM7Xbz8ccfM2zYMN544w0SExP7dL1ms5n8/HwsFgv5+fk9el4cDkePOSQxMTEYDAZ8fX3FfZctW0ZMTMxJDR4JCQkJPz8/dDodOp2O48ePn9Yxtm/fztq1azGZTOJ7nvDFkiVLePvtt8nJycHpdBISEsKUKVPQ6XR0dHQQGhrKnDlzCAkJQavVkpqaSkxMDGlpaQwaNAhBEGhoaECr1aJSqUTRNqPRSEJCAjExMX2WTWhoaMDX1xeNRkNubm6PXmlfX1+v7+RBo9Fgt9txOBzY7Xb27NkDdGqlJCcnn7Lsubfhf8k4uYDIyspCpVJRWVl5Wp8/ePAgr7/+uvggb2ho4P3332fOnDnceOONvPPOO93KNMtkMqKiopg6dSpZWVlYrVaOHz/Ohg0buPnmm4GepfWrq6v5/vvvuemmm3rtrvs5ra2tXH755dxzzz1s374dQRAwmUwUFxcDnauPngyXfv36cfz4cS/XZm5uLjKZzEtDRkJCQuLnKJVKIiIiaGtrw2KxdJtjcSry8/M5dOgQx44dAzrnyg8//JD//ve/GI1GoqKiCA0NFb3hW7duZceOHUBniOSHH34Q+9jExMSQnJxM//79cblcGAwGlEolHR0dpKeni3O1QqHg+PHjzJ0797T0SWpra3G73VRWVvLBBx/w3XffYbPZqKioYMeOHWIIpicvh1qtJigoyCtp1hMCCg4O7rZKx+MJ6q1mixTWuYBISkpCr9ezefNm/vjHP/Zawt1DaGgoLpdLvMEefvhhPvroI699FAoFGo1GlC2WyWTMmzePf/3rX4SEhHhZtS6Xi6qqKqKiohg1ahSRkZFUVlaya9cucR9BEPj666/5/e9/3+csck8LcOj0orz77rt8+umnvPfee2RlZfGvf/3rlHksAwYMICoqyus9T3xz0qRJBAYGSpU7EhISPeIxTlpaWmhvb++xyWhPaDQaMYG1tbWVlStXEhgYSHJysljk4Ek+hf+vmqrX65kyZQqRkZFifpxKpRIFKceNG8fWrVtpbGykoaGBgoICcX5PSUmhrKyMxsZGIiMj+3S9nuaoLpcLtVpNR0cHBw8exGKxEBwcTGtr6ykVa4OCgrr18Mvlcvr3709VVRXgrQzrKSPureCdZJxcQMTGxjJz5kw2b95MQ0NDn3s9ZGdn8+yzzxIUFAQgWuoGg4GxY8cyYcIE0tLSGDhwIHa7nZycHGQyGVdccUW3N6RCoRCTVO+66y52795NU1OTaAx0dHRw6NAhduzYwebNm5k8eXKfrvfAgQNdPDltbW289957jBgxguXLl58yOTg8PJyHHnpIbBoInSsZo9FIQEAAer1eMk4kJCR6JCMjg927d6PRaCguLmb48OF9+vzw4cPZt28f/fr1E6tSBEHg+PHjuN1uDAYD0dHRREVF4XK5KCsrQyaTMW3aNOLj47s95siRIwFITk4Whc48PcPa2tpwOp2Eh4ezf//+PhkngiCI56+trUWj0aDVapHJZOTn5zNgwADa2tpQKpWUlpb2eJz09HTKy8vJycmhvb0dg8GA0WikqqoKPz8/0QA7sZQ4ODhYDAX1Bsk4uYCQy+WMGzeO5cuXU1VV1WfjJDY2lptuugno9ETs37+f6dOn89xzzzF48OAusb6MjIxeHzs5OZnk5GQA7r77bqDTs9LU1MTevXu9ekX0BpvNxrJly7p1G5pMJkJDQ4mMjMThcHQb9zyRwMBAL6+NzWbD5XIRHBxMRkZGr0vXJCQkfnv4+fnh6+uLw+Ggurq6z8bJoEGDGDBgAEqlkpKSEqKjo2lqamL48OEMHTpUDH+cDsOHDyczMxO73S4+1D3hl4qKCmJjY/t0vIaGBqqrq8XCAYPBQGNjo2hEhISEUFNTg1arxd/fv8cmqmq1mpiYGHJycnC5XKJR5nA4SEpKQqlUepUS+/r6olAoaGpq6n1T1z59M4mzzqWXXoqfnx95eXldSrz6glwu5x//+Ieon3I2UCgUhIWFMWvWrD5/trS0lNWrV3e77ejRoxQXFyOXy39Rh2GZTHbWOzxLSEhc3CgUCtLS0ti0aRM+Pj5iCKIveOYZrVZLSEgI48aN67PhcLJjK5VKr2qg8PBwsrKy+nysw4cPY7PZxJySn4umFRcXY7PZSE9Pp7Cw0Ev/pLf83PgwGAwoFArq6ur69LtKCbEXGNHR0SQmJrJs2bJeu7+6Q6vVMnHixDNumHis41/KW2+91aNHRKFQ9DnueyIWi6VXbb8lJCQkAOLi4vDx8aGlpaXHfjK9wVNYcKYME+j0ULe3t/9igU6TySRWWkKn51yr1WIwGMQwjK+vL2azGb1e3+teQzKZjICAAGQyWZcWJ55KSk+jxb4YJ9Ky8gJDp9MxYsQIli5dSmFhYZ9CL6eLp3NkbW0tBw4cEOvW5XI5gwcPRqvVYjQa2bx5MwcPHiQ0NJRnnnmmz6sLD83NzaxatarH7RaLhdLSUgYOHMjevXtPebyWlhav3jsOh+OsC9lJSEj8evAIiQUFBZGbm9vnJNPe4slH8XTndTgclJeXU1JSIi5G5XI58fHxqFQqTCYTx48fF4Xibr755i6NUXvL8ePHxXkxMTGRgoICbDYbERERJCcnc/z4caxWK1qttlcGWnt7uxiy8fX1RSaTdenF4+fnJ1ZSeqp3eqtzIhknFyBXXXUVH374IWvWrDkrxokgCKxYsYKioiJsNhvbt2/HbDZz9OhRURLZg5+fHwqFAofDIWZZP/LII7/o/KtXrz6ppoDdbqexsbFLFU5P7N+/36tW3+Fw9NikUKvVYrVaUSqVuFyu32QnUgkJCW/0ej0xMTFiI75Jkyad8ZCw2Wxm8+bNFBYWit6QE0ttT8STjCqXywkMDBRzYvpawelBEARycnKAzqrQ5uZm8XgNDQ34+PiI4moqlapbyYmfH6+0tFS8Hk/DvxMXhZ5WAFartdt+O6dCMk4uQNLT0wkKCuK7777jnnvuOeMy7G63m9zcXF599dVTehh+vj0jI4M//elPp+018YiknagnkJKSwpAhQ3C5XOzcuZOamhrxXB5r+2QUFBR02cejkeIhMDCQp59+mjFjxpCbmyv2hnj++edP63tISEj8uoiLixPbYdTW1hITE3NGj2+328VS5aioKKqqqtBqtRQVFeHj44PZbBaVYX19fVEqlbS2thIZGSn21OltR9+fU19fT3V1NXK5HIfDIS7mPEUSFRUVJCQkYLfbCQoKorGx8aThe0/ZtNvtpqamRuzDZjKZxDyWwMBAGhoaCAkJISEhgerqavz8/MT5/VRIxskFSHBwMGFhYRw/fpy2trbTFjfrCYVCwZNPPsnkyZN54403xNLlkyGTycjOzuaDDz7o0gOnL+zatYu1a9cCEBAQwI033sjjjz9OZGSkaI0vX76cAQMG0NTU1CvjpDs3Yb9+/YDOkrzvvvuO4cOHk5WVhcvlYsCAAbjd7tNK9pKQkPh1EhISgslkws/Pj4aGhjNunAQEBDBz5kw2bNjAoUOHMJvNGAwG5HI5crmc6Ohorx40HmPl6NGjTJs2jaFDh572uXfu3IlMJmPQoEEcPHgQPz8/Jk+eTGJiIoIgkJ+fT15eHv369aO2tpaKiopee5U9Hhi5XI7BYBCbGQqCgNVqJS4uDqfTSVhYWJ9yFiXj5ALC7XZz7NgxPv/8c4qKihg+fDgGg+GsnEsmkzFhwgTGjRvHsWPH+PLLL9m0aRMbNmzwGjwymYzExETmzp3L//3f/+Hv73/a5xQEgf/973+i2M/ixYu57LLLRDef51wPP/wwAG+++Wav4pMnSxzOzs5GpVKxfv16Nm3a5LXtdBoVSkhI/HoQBAGj0Uhubi6HDh0iJiaGioqKM74g9KDX67niiitobGzk8OHDbNiwgba2NgIDA2lra0OlUuFwOETDwN/fn9mzZ5OcnHza3mqTycSxY8fQarWUl5czZMgQpkyZ4tW/LTs7m1GjRuF2u/nggw/w9/fvkj9yIp4y4e5CUiqViujoaIqKiggKCqKkpMRre2/zASXj5AKgsrJS7DD5008/ieWzl19++VnvrCuXy2lubmbz5s20trZ6xQOVSiX33HMPDz/8MBEREb/4XDU1NaxYsQKVSsUjjzzC5Zdf3u1+FouFbdu28d577wGdN+jJcm9OdhOFhoaKOTOSMSIhIeFpj5Gfn8+RI0eoqanBbrcTGRlJWVmZKGl/NgkJCUEQBAIDA8VOvQEBARQXFyMIAkFBQYwbN45+/fqddgKsh6NHj6LT6TCbzfj6+jJjxgwxH8SD1WqlsrKS3NxcmpqaGDJkCPv37+9RJM5jnHj+/+dzqydHJj4+nujoaHG/0tJSSb7+QsdisbBnzx4+/PBDNm7cSFVVlZeXIDY2lquuuuqcXEt0dDSDBg3ihx9+ICwsTHx/1KhRPPPMM2esHPmDDz6gurqahx9+mHvvvddrm91u59ixYyxZsoTVq1dTXFwslhpnZGT0WeTN0zvinXfe6dLkUEJC4reHpx3HgQMHOHLkCHa7HUEQUCqVYuO9pqYmJkyY0GOn3jNJVFQUR44cISQkBJfLRVFREdApp3/55ZefkbCS3W5nx44dZGZmUlBQwNy5c1Gr1WKn97q6OoqKijh8+LCYBBsUFERFRQVut5vU1NRTnkOlUolNYS0WC01NTeTn5wOdpcQeWQiHw0FJSUmvve+ScXIOcTqdFBcX880337Bq1Sp27drV7ao/PDyc559/XsybONvExMTwwgsv8Ne//tUrUdUjbXwmyM/P59133wU6OyC/+OKL4jaj0cj27dvJy8vr4vKTyWTMmjXrpJNFd4PdYrGQl5fHp59+KlXkSEj8hqmvr+fIkSPk5ubS2toqrvJlMhkajYasrCyOHDlCeXk5qamponT82aZ///7Ex8dTWVnp9RwICgo6Y/kuO3fuxGazkZOTQ1tbG0uWLBG3uVwuOjo6xNCMr68vkZGR+Pv7k5eXJ+Y+dodcLvfy6nsW1i6Xiz179mA2m4mOjiYnJ8dr/pXJZL0W1pSMk3OAzWZj9erVfPTRR2zevNmrg+6JyGQyBg4cyD//+c8+96k5E/xS92FPmM1mHn30UTFL+/333+/V5xQKBQsWLOAPf/jDSfcbNWpUl8RZQRCorq6mvb2d2NhY5s2bx5AhQ8RtO3bs4NNPP5X0UCQkfoW4XC6qq6v56aefKC8vx+VyodVqxW66Wq2W1NRUiouLxQ7BGRkZzJw586yH0k9Ep9P1yjtxOlRWVrJt2zbsdrvoLTlRckEmk6FWq1Gr1YSGhmK326mpqaGkpERMmO0pBKNUKgkNDaWlpQWDweDVv6yhoYGEhASOHz+OXq8XK4xsNhvl5eVSQuyFgNlsZsWKFXzwwQfs2LFD/KOoVCrS09OpqanxUv274ooreO+99/rcU+dCxuFw8PLLL7Ny5cpef8bHx4cBAwZw7733Mm/evFOGlbpLFCspKWH27Nm88MIL3HDDDVitVr7++mtMJpOYjCbloEhI/LrwGCXr1q2joqICq9WKQqHAz88Pl8uF1WrFbDajVqsRBIHKykp8fHyYMWOGKF3wa6Curo6vvvoKh8NBeHg4GRkZHDhwAL1ej8lkwmAwUFtbi0KhwG63c/z4cWQyGTKZjNjYWGbNmtXr55CnpBg6m8EOGDCA48ePc+mll2IymSgoKBBz/lQqlaRzcj5pb2/n66+/ZsmSJWzcuBGXy4VOpyMzM5NRo0ZxxRVXkJ2dzZ/+9Cc+/vhjoNPF9/bbb/9qDBOHw8GOHTt4/fXXWbVqVZeqmxNbaSsUCoKCgsjKymLmzJlMnDiRxMREr2zyk+FZCZ3oGs3NzSU+Pl7sWPznP/9ZkrSXkPgVU1paKubvCYKARqMRdUM8lYCpqamsXr0avV5PeXk5CoWCSy+9lEGDBonHOZ3eOhcK9fX1bN++nfz8fIKCghgwYAA1NTVs2rRJrExyu900NDSI2iRqtZqwsDCSkpJITk4mKSmpS5PY7vCUCwuCgF6vx2w2Y7VaSU1NZfTo0axevZr8/Hwx50+pVDJ27Fgv783JkIyTM4jFYmH37t384x//EI0StVrN0KFDef7558nOzha9AIcPHxb1PnQ6Hc8991wXRdQffviB8ePH9/ohfaFw8OBBnnrqKdasWSMaDFqtFpVKhcViwel0MmzYMK655hrsdjvjx48nKSmJ8PDw03KpJiQkEBAQ4GWcxMbGolAoyMvL4/7775cMEwmJXymlpaXs37+f/Px8sYu5Xq/HbreTmZlJZmYmERERaLVa1qxZg9PpZPTo0WzdupXo6GgyMzOx2WxoNBqsVisbNmxg8ODBZ1zn5GzidDrZvn07W7duRa1Wk5KSQnh4ODt37sRisSAIgij0VldXR2BgIAMGDCAuLo6EhATCw8N7ZZCcSFRUFHv27MFgMJCamsq+fftQqVTo9XpycnI4cuQIcrkcPz8/0XNVUFAgKo2fCsk4OQN4MqKfeOIJ9uzZg8ViQafTMW3aNO69916ysrK8DIy6ujoWLFhAbW0tcrmcxx57jNmzZ9Pa2srnn3/O7bffjkwmY/HixVRVVXH77befx2/Xe9xuN8uWLePxxx8XS/KmT5/O1q1biYmJ4aqrriIqKoqWlhY2bdrEm2++SUZGBgkJCaSnp5/RWO+sWbNobm5mwYIFVFRUdNk+YcIEYmJi+PTTT8/YOSUkJM4Nbrebqqoq9u7dK1be2O12dDodgYGBpKWlMXjwYJKTk8UwwuHDh8nPz2fYsGHs378fmUzG1KlTKS8v59ChQ0ycOFH0tOzYsYM5c+ZcFF3NrVYrX331FWVlZaSmplJSUkJ+fr4YviouLsbtdqNUKklISMBgMODn54fNZkOn0xEcHNxnw+REVCqV2FsnLCxMlMZwOp2MHDmSpKQktm/fTn19PWazWUqIPRd4lPXuv/9+tmzZgsViQaVSceWVV3L//feLAmAnYjabue222zh48CAAAwYM4O6770apVFJdXc13330nGiNTpkzhb3/7G6NGjWLw4MHn/Pv1BbfbzX//+1/+/Oc/iyXAcrmchx56iDFjxvD000/zwgsvoNfriYiI4Oqrr+aKK65g+fLl3HbbbURGRnLbbbcxf/58+vfv/4uuxZOs9dlnn7F///5u99m6detp96mQkJA4f3i8GwcPHsRms+FyuXC5XBgMBgYMGEB2dnaX8Pjx48f54YcfsFgs4up93rx5YqO/kJAQLBYLISEhVFdX09zczE8//cSUKVPO07fsHVarlS+//JLGxkbCw8Ox2Ww4HA5iYmKwWCwYDAb69esnysobjUbUajUymQyDwcCGDRvYunUrgwYNYsyYMb3WIPk5nsICrVbL9u3bcblcxMfHU1RURE5ODmq1muTkZBwOBxqNhjVr1pzymNLsfJo0Njby8ssvc8kll/Djjz9it9uZPXs2y5cv54svvmDixIldDJP6+nruuOMO8Q8TFhbGa6+9Jsqo5+TkeIUmZs+ejdvt5q233vLqunsycnNz2bRpE2VlZb3Oiv6l/NwwSUlJITs7G7vdzpNPPsktt9wiVsp41ApffvllNmzYwJIlS7jnnnuorq7miSeeYOLEifz973/vsXGfhxNLnvV6PcnJyeJrf39/4uPjWbx4sVcFj0qlIjY2loSEBAYPHtzrxoISEhIXBvn5+Xz00Ufs2bMHpVKJTCbDx8eH9PR0br31VubMmdPFMCksLOR///sfQUFBaDQaOjo6GDZsGGlpaUBnON4jmaBUKhk4cCAqlYri4uJTtvWATs/5hg0bWLFiBVu2bKG6uhqn03nWJQw8HpOSkhISExMpLi4WF73V1dXExMRQVVWFRqMhODgYf39//Pz88PHxIS0tjYaGBiwWCx0dHWzdupV///vf5Obm9qgLZbVaqa2tFb+bR+Cyo6OD/Px8r+72iYmJ1NTUiI1kzWYzBQUF1NXVSb11ziZFRUUsWLCA3bt3IwgCKSkpPPHEE1x99dU9NmYqLCzknnvu4ccffwQ6kzifeuoppk6dKu5jtVq9Ekejo6O56aabePXVVxk5ciS33nqrKBzUnf6Iw+HgoYceYuPGjfj6+jJkyBDGjRtHVlYWiYmJotqfXC4/Y2XDDoeDjz/+mHvvvVf0mISEhHDttdeyfft2du3axebNm7n33nu59dZbRaNi8uTJVFRU8Mwzz/DGG2/g7+/Pq6++Sn19PU888QSff/45119/PXfffbeXhL8gCLz//vv873//Y9SoUYwZM4YJEyZw/fXXs337dvH3q66upqioCIVCQWZmJmPHjmXGjBlERERQX1/PoUOHMJlM/P3vfz8jv4OEhMTZw+l0snv3bjZs2ACAWq3GYrEQFxfH1KlTu80PcbvdbN++nR07dpCYmIi/vz+NjY3079+fqVOniqEMTy8bD4MHD2bfvn1ERUWxZs0a5s+fT0NDA4IgEBsb2+U89fX1HD58mPT0dIqKiti0aRO+vr6EhYUREhJCdHQ0MplMlHX3CL/9EnHL+vp6fvzxR4qLi0lISCA3N1c8h06nQyaTUVtbS1RUFBUVFaSnp1NYWIjRaESr1VJRUcFll13GunXraG9vJyMjg/r6er788kuCgoKYPn26V0jMYrGwYsUKamtr8fHxITg4mMGDBxMeHk5TUxODBw9mz549VFRUEB4eTn19PQ6HQ9RP0Wg0NDU1iX17eoNMuAgVqtra2vD396e1tfWs9Z7piT179nDjjTdSWFiIwWDgjjvu4J577jlp8tTu3bu59tprxRbT48aN48UXX2TkyJFeWeEvvfQSq1atYt26deKgaGpqYuLEiVRUVPDll19SU1PD0qVL+fvf/05WVlaXrPK8vDwWLlzIgQMHRENALpej0+nEfhG+vr5MmDABi8VCUFAQw4YNIzg4mEGDBhERESE2ceoJT/fJ9evXi4quJ3p2rrvuOmbNmsWCBQsAmDNnDv/5z38YN24cR44cAWD+/PkMHjyYFStW8OWXX5KQkMDq1at56KGHyMvLE6972LBh/O1vf2Pq1KmoVCpKSkoYO3YsSqWSmJgYDh06xKBBg/jggw+46qqrKC4uJisri7Vr17J582bcbjeJiYksXryYXbt2UVRUREtLi9hgCzin4+h8jl2JXw/nYxydr7HrcDj47rvvKCoqwt/fn7q6OgICAhg3bhxDhgzpNl9CEAQ2bdrEjh07iI6Oxm6309DQwNChQ5k4caK4iHQ6nSxZsgS1Ws2ll14qytbn5uaya9cuEhISaGlpQaVSiSJt48eP91qECoLA7t27OXLkCG63G4VCQWNjI1arFZlMJnqwPV4eT/6Hv78/er2exMREoqOj0ev1XX5XuVwuzlVGoxGj0SgaAXa7HV9fX9RqNa2trWg0GgRBQK1Wk5SURG5uLpdccgnr16/Hx8cHQRDo6OggMDAQjUbD8OHDSU9PZ926deTn5zNo0CAsFouYyDpw4EDGjx9PWFgYW7du5dixY0CnfkpSUpLY8K+kpITIyEhqamq47LLLxIa1arWasrIy/P39CQ0NFWX6N2zYwDPPPHPKcfSLPCfPP/88jz32GPfeey+vv/460Ln6f/DBB1m2bBk2m43p06d3KZEtLy/nzjvvZOPGjej1em666Saee+65Cz75aNWqVfzpT3+itLSUrKws3n333R5vDg9Go5E///nPlJaWEh0dzV133cXdd9/dbQVOYGAgRUVFNDQ0iL9XcHAwTz75JAsWLODuu+9myZIlZGZmMmfOHG644QYeeughQkNDxWOkp6ezdu1aNm7cyJ///GdRFr+jo8MrS9pjAHhQKpUEBgaSnJxMYmIiWVlZmM1mHA4HCoWCpKQkioqKEASBvXv3UlZWRkFBgVd4BWDYsGE888wzrFq1Snyvrq4OvV7PjBkzRONk//79vPPOO9x3332iF2fmzJmkpaXxyCOP8OWXX+J2u9m7dy/z5s1jxowZPPHEExw+fJiGhgb8/Px4/fXX+frrr3nllVf49NNPmTRpEsXFxVgsFmQyGdOnT2fZsmVceeWVlJWV4evrS2pqKrfeeiv79+9n3bp1Xtf+ax67Er8ePPPuifxax67VamXt2rXk5eWJImKjRo1i7NixJ61iLCwsZMeOHahUKqqrq/H39+faa6/1Cv9Cp9CjzWbj2LFj9O/fXzRO+vfvL4Y4iouLxTy4Xbt2UVhYyKRJk+jfv78YWho5ciT+/v7s3buX2tpacTGYlpaGTCbDbreze/dubDab6M3weJoLCwvF0mc/Pz+cTqeYmOvv7y+W3prNZi91W61Wi8FgoKGhAblcTnZ2Nrm5ubhcLvz9/XE6nYSHh6PX65HL5aSmprJnzx4yMjKIjY0lOTkZpVIpSuVv2LABu91OUFAQNpuN3NxcCgoKGDVqFHl5eeICNDw8HKfTSXJysthMtbi4GB8fH3x9fZk8eTIrV66krKyM8ePH09jYSH19PVarlaqqKiorK3v1tz/tUblnzx7ee++9Loma999/P99//z3Lly/H39+fu+++m7lz57Jt2zagM1fgsssuIyIigu3bt1NTU8PChQtRqVQ8++yzp3s5Z5X29nbeffddXnzxRZqbm1m4cCF///vfiYuLO+VnFy9ezN69e5kwYQL//Oc/xdyL7hg1ahTNzc3s2bOH2bNni+9feeWVXH/99SxevJhbb72VlStXMmzYMB588EFWrlzJ3XffzbXXXktgYCAymYzAwEBCQ0NJTk6mqqoKPz8/fH19aWxs7FF4zOl00tDQQENDAzt37uSzzz7z2v5zBdbuGDZsGP/+979JTEz0UsH1uBmvv/56/v3vf9Pe3o7T6UStVncJLyUlJbF48WICAwP5+OOPsVqtWCwWvvrqK7Zu3cqdd97J3LlzWb58Oc899xzvvPMOOTk55Ofnk52dDXTGpdevX09hYSF//etfcTqd3HLLLSxatIjY2Fi2b9/O0qVLiYmJ8bpRfo1jV+LXhWfezcjIIDc3V3z/1zh2S0tLWbNmDbW1tQQEBOB0OpkyZcpJ51DozAHZtGkT/fr1o6ioiDFjxvSY7OnRP2loaODo0aNkZWUBnYu1qVOnsnjxYsxmMwcPHmTkyJE0NzfT0NDA2rVr2bFjB6NHjyYxMRG9Xk9SUhKVlZWUl5djMBioqKgQG93pdDoiIiKwWq3I5XIxlKTT6TCZTKhUKnGObWlpISwsDKPRSFtbmxiqlsvlyGQyFAoFgiDQ1taG0+nE5XIxceJERowYwf79+/Hx8cHpdIrh+/T0dPbv3y+GkSIiIsR8G+g00LKysoiKiuLzzz+npaWFIUOG4HA4KCgoEIs9fHx8SE1Npbq6mlGjRrF161ZxcRoREYHD4WDr1q1iTopSqWTXrl0MHTqUfv36YTabqaysFPvunIrTSog1mUzccMMNvP/++2IyJ3S6xz/88ENeffVVJk+ezPDhw1m8eDHbt29n586dAPz444/k5eWxdOlShgwZwsyZM/n73//OW2+9hd1uP53LOascOHCAuXPn8thjj2GxWHj99dd55513emWYmEwmfvzxR5566im+/fbbLjfVz70OERERhISE8N1333nlnqjVap599lmmTJnCkSNHuPbaa+nfvz9r164lMzOTRx99lFGjRnHPPfewdu1ampubOXjwIPv27QM681seeOABli1bxrPPPktUVBRBQUEMHz4cX1/fXv0OvTFMPvjgA4YPHw7g5U2aOXMmCoWCjIwMMjMzgc7GfMuXL+/2WHq9nrfeeovXXnvNy33a0NDAK6+8wpYtWwD49ttvefvtt7niiis4duwYAwYMEN2gTzzxBC+88AJOp5OgoCCxb9ATTzzB7NmzaWxs9DJMfo1jV+LXxYnzrqeZGvz6xq7b7Wbz5s18+umnYmK/Xq/nhhtuOKVhAlBRUUFISAhGo5FrrrmGiRMnehkmP1+khYeH4+vrS1NTk9j8Djq91rNnz0ar1aJUKiksLCQsLIyBAwciCAJms5m1a9fy/vvv88UXX3Do0CEaGxtRKpVibkp9fb1YKVNUVCQaLIAYZpHJZKKHIz4+nrCwMNRqNZGRkYSGhqLT6fD19aVfv34MHjyYlJQUUXoeOmURJkyYgEKhQCaTiecKCgoiNDSUjIwMnE4nHR0dhIaGcuDAgW6TXg0GA1dffTUJCQns37+flpYWpk+fTmJiIjqdDrfbTWlpKQqFgn379qHVaomNjUUul2Oz2RgwYAAVFRXU1dUREhKC3W5n5MiRqFQq1q5dy+rVq8X+Rr3htIyTu+66i8suu4xLL73U6/2cnBwcDofX+/379ycuLk7sX7Bjxw4GDRrk5W6cPn06bW1tosv/59hsNtra2rz+nQvWrVvHvHnzWLduHdHR0bzzzjv86U9/6jHp9efo9XqWLVvGX/7yF6/mdIIgcPDgQR599FGvXI2QkBAxRvjzni8REREsXbqUGTNmkJOTw1VXXcXhw4f56KOPWLt2LSNGjOC///0vs2bNYvTo0TgcDt577z0CAwMpKyvjkUce4aGHHqK9vZ2goCAMBgNPPfUUW7Zs4bPPPuMvf/kLc+bMwd/fn/79+5OWloZcLqdfv34ndfsGBweLgmtDhw4V309ISAA6mwdOmDAB6DSyPO5Rq9XKX//6VzZt2uQlkHbw4EHuvPNOnn76aWbNmsXbb7/tZQh2dHRQW1srvn755ZcpKChgxIgRYu4MwJEjR0R36KxZsygpKWHGjBm89tprOByOLpVMv7axK/Hr47cw7zqdTr755ht++ukn5HK5WIlz/fXXiyGXUxEfH8+AAQO48cYbSU5OFvPyPKrVO3fu9ErK9Oxjt9u7JGv279+fq666SvR+lJeX09jYyJVXXikmjIaHhxMcHMyBAwfECpjc3Fyys7PJzMykf//+XrmB/v7+ZGZmEh4ejp+fnygjYTQaycnJwWg0UlZWRlVVFRaLhbCwMCIjIwkKCqKkpISjR4/icrkIDg5m7ty5TJw4EblcLibDNjc309TURFxcnBiu12g0FBUVIZfLKS4u5vvvv2f37t1iCGnTpk28/fbbfPvtt6IwW1VVFT/++CMul4vExESUSiUOhwO3201rayuNjY0kJiYyZswYsZLHYDAQFRVFfn4+crmcmpoaNm/eTFtbG0FBQWRkZJCRkdGrv2OfjZNly5axb98+nnvuuS7bamtrUavVXlY9dFqmngdKbW1tl1Ivz+sTHzon8txzz+Hv7y/+6y5j+kyzadMmfv/734v5JStWrGDBggV91sbwhFqgc0VQU1PD4sWLmT17NsOHD/equpHJZPTv35+ysjLWr1/f5VgtLS1MnjyZ++67j+bmZm677TbuuOMOoqKi+Pjjj1m7di3Z2dmUlJSwaNEiL+MnKSmJjo4OnnvuOXJzcyktLWXBggUsWbKEI0eOEB0dzbvvvsukSZNIS0vjiSeeQKVS9RgKkslkDB8+nC+++IInn3xSTLb14PnMiBEjvAbjrFmzRK9KdXU1M2fOZOTIkXz88cd8+eWXXHnllbz77rs899xzPPnkkyxcuFA0Ek8szQ4LC+O+++7D5XLxzjvvoFarxZyZE0lPT2fcuHEsXLiQqqqqHv9Ov6axK/Hr47cw75rNZr7//nvq6upwu904HA7GjRvH/Pnz+1TZolQqSU9PFxeRZrOZ3bt38+mnn3Lo0CFUKpXXgsfHxweVSkViYiIHDhzwOpYnzGI0GqmpqSEqKoqQkBC+//57YmNjueGGGzAYDBQVFTF48GDGjx9PTEwMvr6+WK1WGhsb0Wq1OBwO4uPjmTZtGk6nk2PHjokiaSNGjECv16NWq8XeM57Qj16vJzAwkJKSEnbu3InJZEKpVDJs2DBuv/120tPTvZ5JgiAwbtw4dDqd6GXS6/VERUXR3t5OZGQkgYGBHD58mFWrVrF06VI++eQTtm7dSkREBIIgUFtby/Tp05k8eTJyuZyKigr8/f0xGAy43W7q6+uRyWQEBARw+PBhoqKiUCgUNDc3I5fLaWpqEsNNxcXFhISEMHToUCIjI7s0HzwZfXrSVlRUcO+99/LJJ590W8p6tnjsscdobW0V//W2FOl0EASBlStX8rvf/Q6j0chf//pXNmzYIMYifwk7d+5kwoQJLFq0iAULFjBv3rwu+1xzzTUYDAY+++wzL6PAZDKxYMECHn30UeLi4vjmm28YO3Ys//3vf5k9ezZr1qwhIyODVatWsWzZMsaOHUtzc7Oom5KcnMzYsWMZNmwYCQkJKJVKWlpaeP311/nHP/7BPffcw9ChQ/n+++/54Ycf+MMf/oDNZuvSQVmtVpORkcHrr7/OmjVruu2e7HQ6WbFiBdC58jhxYpkyZYqXyJonSeqmm25i/vz5lJWVoVAouOOOO3j11VeRyWSkpqaydOlSHnvsMTEJrrGxkbi4OBYuXIjL5UIQBHQ6Ha+//jpz5swhOTmZIUOGkJWVxVNPPeXlnVEoFCiVynMiwnYux67Er5PfwrzrKVVVKBQkJycTERHB7373OyZMmNBFL6qv7Ny5k6qqKvH6x4wZ4xV2ViqVZGZm0t7eTlNTk5eC6bFjx9i6dSuhoaHo9Xra2tooKipi9OjR7N+/n59++onBgwczffp0CgsL2bJlC4GBgYSEhFBTU4NcLufYsWMIgkDF/2vvzKOjKLP+/0nSnc6eTiArWxIgbNlI2BLZZRUlKExQBFEUB0RFEOUMOCLvK7KIzigMjPoTEEYElFEgBEJkTQgQCISQhaxkhexk7U6v9fuD0/USCRAgCsH6nMM52l3pqqfqVtV97nPv9xYWEhsbK0Y3QkNDGTZsGEVFRWJ/ML1ej16vR6lUEhwcTGlpqahdAogClqblppspKyvj+vXrlJSUIAgCzs7O4nf9+/fH3NxcFKAzaZIUFBRQUlKCXC4Xo8khISG4u7szePBgZs6ciZubGxcvXkSv1+Po6IhCoRAnp42NjXTu3JmwsDD0ej01NTXU1dXR2NiIQqEgICCAhoYGkpKSSEtLIz09/fdJiE1MTKSsrIzg4GDxM4PBwIkTJ1i/fj3R0dFotVqqq6ubePGlpaViSM7d3Z2EhIQmv1taWip+1xwKheK+levuBZ1Ox+7du3nnnXfQarX861//Ytq0aQ8k7Xsz/fv3Z/fu3VhZWYlhst/SpUsXBg4cyNGjRykpKRFLlLVaLUVFRRgMBr744gvi4+PZtWsX33zzDRs3bmTKlCkMHz6cVatW8dxzz/H0009z8eJF1q9fz9GjRzl27BharZaXX36Zjz76iMLCQrZv386RI0fw9fWltrYWQRDo3LkzFy5cuKX/gVwuJyAggA8//JCRI0feUSclJydHvAmnT5/e5DsHBwcWLVrEggULmnjQpgZSJsfk008/beLUKBQKPvzwQ4KDg3n11VeprKxk5cqVLFq0CAcHB86fP49Go2HgwIH8+OOPqNVqTp8+zYQJE9DpdLi4uFBeXt4ksdeU1+Ps7NzmbVfi8eV2z114PGy3oqKC/fv34+zsjI2NDcXFxUybNq3VeooFBASQnJxMWFgYffr0abZ8tUePHiQmJqJUKsnKyhIjwXV1ddTV1REYGEhKSgrdunWjf//+REVFiVorUVFR+Pr6Eh4eTnV1NRcvXiQzMxOj0UifPn1wdXXl7NmzdOzYEYPBICbJ3qxSrdfrsba2pkuXLshkMmpqakhLSxOfid26dSMkJARvb+/bOmupqaliPkr37t2bPKO9vb3x9fUlPz+fxsZG2rdvj7+/P+fOnSMoKIiUlBRUKhVTp07F1dVV/Dt3d3emTZtGTEwMmZmZBAYGkpmZSVFRkaiZUlFRwciRIwkMDKSsrIzo6GhsbW0pKysjKSkJQRCwtbWlc+fOWFpailVKd+Oepo5PPvkkly5dIikpSfzXr18/XnzxRfG/5XJ5kyWJjIwMCgoKCA0NBW54rZcuXaKsrEzcJiYmBgcHB3r37n0vh9NqCIJAfn4+S5cu5ZVXXsFgMLB161amT5/exDHJzs4Wb+j7wfSC9/X1va2BWVhYiH1hTMlscCM0161bNwByc3P59NNPsbW15d133yU+Pp4PPviAM2fOMHr0aBYuXMilS5fEPJTz588TGRnJhAkTxKhQYWEhy5Yt4+TJk/z3v//l0KFDbNu2jYULFzbJj1EoFIwYMYKffvqJX3/9lYkTJ4pGfzsVxD179lBVVUVISIh43W/mpZde4sCBAwwePFj8zMzMDC8vL5YsWXKLY3LzuQkPD2f37t14enpSVlbG8uXLUavVZGVlcebMGXE7Ozs7jEYj5ubmfPDBB/znP//hu+++48KFC1y8eJGkpCSxE2lcXFybtV2Jx5/mnrum/K62bLv19fUkJCSwadMmKioqMDMzo6GhgWeffVZ0TARB4MKFCyQnJ9934m779u0ZOXIkI0eOvG3XdxcXFxQKBTY2NqJkgulzCwsL5HI5NjY2HDt2DGdnZ2bPnk1wcDCNjY20a9eO4uJiNm/eTFpaGsOHD2fOnDmMGzcOnU7H2bNnEQSBU6dO0dDQwMCBAxk6dCgvvPCC2IHdpFUCN57vlZWV6HQ6fH19iYiI4Pnnn8fX1xegSRmyCYPBQEpKCn369KGwsFAsTDChUCiYNGkSgYGByGQyqqqqxEloXFwcMpnsFsfEhIODAxMnTqR79+5cunSJHj164OnpSW5uLiqVShSAa9++Pb1798bDwwNLS0s0Gg2Ojo64u7vz0ksvERERwbhx4/Dw8GjRdXtgEbbhw4cTFBQk6pzMnTuXqKgotmzZgoODA2+99RYA8fHxwI2TGBQUhKenJ2vWrKGkpIQZM2bw2muvtbikrbXEgHQ6HZcuXWLv3r1s2bKF/Px82rdvL+aE/Javv/6aL774gtmzZ/P666+3ODH2Xjl//jxDhgzhxRdf5OuvvxY/f++991i7di1ww1k5fPgwAwYMAG7cxImJiaxYsYI9e/bg7OzMyy+/zJw5c8SEL51OR15eHnv27OHbb79Fp9PRs2dP/Pz8OHPmDOnp6VRWViIIAh07diQiIoKJEycSEhIiOgsqlYr09HSOHz9OfHw8q1atEp0muOGwPPPMMxw7doytW7fyl7/85bbjrKioYMaMGRw8eJB27drx5Zdf4unpiZmZGQqFgt69e9/2+sbExPDaa69RUFAgftavXz/27NkjytLHxsYyZcoUsaSwX79+7Nq1i7KyMv71r3+xbt06NBqNaEdtyXYl/twMGTKEuLi4Nmm7paWlnD17loKCAqqqqvD39yc7O5vu3bvz1FNPNYko63Q6Dh8+TFpaGg4ODjz55JN06dLld1mSjYqKoqamhuLiYubMmYOdnR1qtZoNGzagUqnE8t+goCDx/aBWqzlx4gT5+fn4+flRU1NDdnY2QUFBBAQE4ODgQGlpKZcvXyY5OZnq6mqxE/K1a9dITU1FqVTi4+NDaWkpOTk52Nvb4+PjQ0hICJ06dRKrZAoLC0WRN7lcLkonwI28oR07dtC3b1+uXr1KRETEbcXpsrOz2b17N4IgoNVq0ev1+Pj4iIJuPXr0oEuXLre833Q6HXv37hUF2/Lz88Uk6fDwcHGyFxkZyeXLl6mrq8Pb25va2lqeeeYZysvLiY2NJSEhgV27dt3VjlrdOTGJAf3www9NxIBuDh3m5+czd+5cUeZ35syZrFq1qsViQA9ykxgMBoqKijh79ixbt27l119/FZX8XnzxRd566y369+/f7N/q9Xo+/fRTPv74Y/r27Ss25WstKXgTarWa0NBQampqOHXqlHjufvnlF6ZMmYLBYEAmk7FgwQLWrFnT5G/r6upYt24dn3/+OZWVlbi6ujJu3DgmTZpESEgInp6eoud87tw5ysrKiI2NpaysTCwLHzFiBOPGjcPFxQWNRkNxcTEZGRlkZGTw/fffYzQamT59Os8880wTxwRuGObUqVMZO3YsO3fuvOt6cUpKCuHh4eTm5jZ54MhkMnx8fBg/fjwLFixoNhlv3759zJo1q8ka8WuvvcYXX3yBjY0NKSkpjBgxgoqKCkaMGMF3331HbGwsS5cuFcv54P8UYh9125WQMPFb5+RRt92qqipSU1O5cuUKxcXFKJVK7OzsqKioEJc/Ro0a1eyx1NbW8ssvv6BUKikuLsbd3R0/Pz+6dOnSqp3Ms7OzOXDgAA4ODvj7+4vLaP/5z38oLy9HoVBga2uLRqNh5syZTZa8ioqKOHDgAC4uLnh6eorjdHFxwdvbm27duokim1euXMHS0hILCwtSU1PF94+rqyv+/v50796d69evk5eXx/Xr17GysuLatWv079+fkpISHB0d6du3r3iuBEHgp59+wsHBgczMTCIiIm4bITIRFxfHiRMncHV1paysTNRFMTc3x8LCAltbW4YPH35LZY1OpyMqKork5GQsLCwYPHiwKHZnqqg6fPgwxcXFXL9+HXt7e/z8/MjIyODKlSsolUqys7NZt27d7++cPAzu5yYxGAwkJSXxxRdfEBkZSW1tLQaDAWtrawYOHMizzz7LrFmz7upo3OygGI1GevTowYsvvsgLL7yAm5vbAydvwQ1je/vtt9m4cSPbtm3jhRdeAG4kPJkk67du3YqXl5fYL+dmjEYjGRkZ7Ny5k8jISC5evIggCCiVSgYMGMCrr76Kj48P3t7eTc6PhYWFKJSTl5dHYmIiJ0+eJCUlhXbt2jFs2DAGDx5MeHi4WLJ7M2q1mqeeeoqMjAwOHz5Mr169WjTeXbt2MXPmzGabG7Zr147t27czZsyYZv82MTGR119/XdR0MTc3JyIigtWrV9OpUyex1HrNmjWcOnWKGTNmoNFoUCgUeHt7c/ny5T+FBLjE40Vbka+vq6sjLi6OrKws3N3dcXFxoaysjIyMDOzs7LCxsSE8PPyuof7a2lr27t0rvnSzs7MxMzMjKCiILl26tHip4E7U19fzzTff4Ofnx9WrV8XqzMTERJKSkmhoaMDW1hZ/f3/69+9/S+sQlUrFmTNnuHTpEubm5mLl4PXr1ykrK8PZ2VnstWPSSVGr1SgUCjGCW1dXR2lpqRipsbCwYNiwYZSWluLn50f37t1veccUFBSwZ88e0TEySTfcCYPBwM6dOykpKcHZ2Rl7e3u0Wi1ubm5kZmZibW0t6rz8Fp1Ox9GjR0lISMDa2hp7e3vMzc2pra1lxIgReHl5sWfPHgoLCxk+fDh5eXk4Ojpy4cIF5HI5Fy9e/GMiJw+De71JKioqWLNmDf/+979paGigY8eODB8+HFtbW1544QUGDRp0T06FwWDg8OHDLFu2jMTERPR6PS4uLoSEhLBgwQKGDBkiZjTfL6dPn+bJJ58kLCyMAwcOIJPJ0Ov1TJo0iSNHjnDixIm7VhCZRH7i4+PZv38/p0+fJjs7m7q6OuRyeRMBPRM6nU5UJfTx8WH48OEMHz6cJ554QmxgdTvWr1/P+++/z+eff86cOXNaPNaamhoCAgLEJRqZTEa3bt2YOnUqffr0QSaTERoaipubW7P7z87OZubMmZw7dw5BEDAYDPTq1YshQ4bQs2dPSkpKRIfJzs6O+fPnExYWhre3Nx07dnzkH/ASEr/lUXdOBEEgPT2dEydO0LFjR1Gj49q1a5iZmdGtWzf69euHh4dHiwsO6urq+PXXXyktLaVnz54oFApycnK4fv06Hh4e9O3bF09PzwdqqBcZGQncSOqPiIjAw8OD0tJSfvjhB7y9vbGwsGj2hf3b48zKyiIzM5OrV6+KZd5VVVUoFApRTt9UbmxKLFUqlajVajGS4ubmhr+/Pz169MDe3r7ZZ5+p8aq9vT319fVMnz69xe+y5ORk9u7di7W1NZ07d+by5cvo9XpRxdZU8ejv799sYnRiYiIHDhwQpfeDg4PJz88XOxHX1tZiZWVF7969sbS0FFu45ObmsmjRoj+3c2LqAzN37lwuXrxIz549+etf/8pzzz0n5iU8CPX19Zw9e5Y9e/Zw8OBBsrKyRKGx8ePHi70c7mfZp66ujv79+1NUVERMTIyY2BYdHU14eDhr1qzh7bffvuNv5OTksHbtWpYtWyZKJxcXF1NQUMDZs2cxGo2kpaVRWFiIq6urmGRnZ2fHwIED8fHxaTZC0hzZ2dmMHz+e3r17s3PnznsqeTQYDGzatEks+R04cCBvvvkmmzZtIjExkYaGBjw8PPjoo49Eye3fYirxEwSB1NRUfvjhB3JycsjJyWmStDt37lxWr16Nvb39I/+Al5C4HY+y7TY2NhIXF0dBQQHdunUjPT1dzGXw8vJCqVTed76e0WjkypUrXLhwgaKiIuzt7QkMDKS+vl6U8zf1B+vcufM97yc9PZ3Y2Fi8vLzQaDQ888wz4rKJWq2mtraWefPm3XGSFhcXR1lZGX369KFz586UlpaSn59PWVkZV69eRRAEsZmfmZmZuJytUCjw9PTEx8eHrl27tujYExISuHjxIhqNhkmTJt2xAe1vqaurY//+/WRkZODi4kJNTQ0ajQZLS0t0Op14fKbl9d9qWcGN5356ejpGo5GqqipKS0uRyWQ4ODigVqtpaGjA2tqa2tpawsPD8fHxISoqihdeeOHP65yoVCo2bNjAqlWr0Gq1LFiwgEWLFmFvb8+ZM2f49ttv+fDDD+/pYt6J69evs3PnTtavXy8q+Nna2tKpUyfGjBkjJpY6Ojo2a9hVVVUcPnyYYcOGiRnTa9euZfHixQwZMoR9+/aJ3vGIESNYuXLlLUqRv6WyslKUNv7HP/7RrMNgKuE1MzO770hPbW0t48ePJy8vj5iYmCbZ/9XV1dTW1tKxY8e7JrHl5OQQGRnJtm3byMnJuUWsR6FQ8Pzzz7N8+fJml7NuxnSzLFq0iP/85z9i6aVcLqd79+6EhITg4+PD8uXLH8kHvITEnXhUnZOSkhIiIyNxd3fHxsaG/Px8wsLC8PLy4vjx4+Tk5DBs2LBWqRBSqVSiOJmZmRl9+vTBycmJw4cPo1arEQRBVIvt2rVrsxGVtLQ00tLS6NatmyhLv3nzZsLCwsTotY+PD5mZmZw+fRo7Ozuee+65Ox5XSkoKBw8exNHRkUmTJjVpzGqSL6irq0OlUmFlZSVWR97rMzgrK4tjx46JImvDhg0T95GRkUFlZaWYlHs7dDodZ86cISUlhcbGRuzt7Wnfvj06nY6ysjLRuVCr1YwePZrAwMDbHqPRaKSiooKSkhKOHDlCY2OjmJOYmpoqRlgyMzPZsmXLn9M5ycnJYcmSJfz000+4urry1Vdf8fTTT4svx6NHjxIeHk5wcDBbt25tUZ+cllJVVcXJkydZt24dx48fF8vf5HI5Pj4+PPfcc0ydOrXJPk3NnMaMGUNFRQXz58/npZdeAmDixInExsayfPlyFi9ejFwuF1tUtyR8uWnTJt5++23WrVvHyy+//EBLTc1hNBpZv3497733Hl988UWT5ZyrV6/y3HPPkZOTw5QpU/joo4/umqgFN2ZeZ86cYd++fXz77be3OClBQUEsXrwYT09P+vXrd8cZRn19PUuWLGHdunW33eZRe8BLSNyNR8050ev1nD17llOnTuHp6Skua48cOVLcdt++fWi1WlQqFT179iQ4OLhVNKT0er1YDVNRUYFcLhd1mgwGA2q1GicnJ3x9fZvkppiZmaHRaDhx4gQWFhbY29szePBgZDIZR44cISwsjLi4OKZMmYKTkxMJCQl4eXndNb9Fp9OJjVNlMhmTJ09udb0YtVrNjz/+iI2NDfX19bzwwgviPi5cuEB0dDQajQZbW1ueeeaZJo3+mkOv11NQUEBKSgpZWVmoVCqMRiN2dnY4ODhQUlKChYUFnp6e2NraMmTIkDu2EygqKmL79u1otVo8PDwoKioSJ8FpaWl/vpwTQRCIi4tj9uzZZGZmis7Hb710g8HAd999x1tvvYW7uzsbN25kxIgRD5zMavIc9Xo99vb2nD59mh9//JGtW7c2abRka2vbREvEzMwMPz8/Ll26xNWrV5HJZPTs2ZMVK1aICbepqamsWbOGefPm3VMZXW1tLSNHjiQ/P59Dhw416X/zoNTW1rJ+/XpWrFhBcHAw+/fvF69HSUkJL730EjExMeL2o0ePZtu2bS1yUODG+bx06RLbtm2juroajUZDTEwMpaWlYsvwLVu2EBERccffKS0tZe7cufz888/Nfv+oPOAlJFrKo+ScmBqcFhYW4ubmRnFxMf369SMsLKzJM7W+vp6dO3fi4uJCbW0tdnZ2PPHEE00iC/d7XNnZ2WJZ7OnTpykvL8fT05OuXbuK8v75+fm3tOKwsLDAaDSKOX3m5ub06NGDdu3aUVVVRceOHcnKymLq1Kn3tFSdmZnJ8ePHcXZ2RqlUMnLkyFaZGJqWtY4cOSJ2ph45ciRdu3bFaDSSkpJCdHS0uBxfU1ODTqcjPDz8rg6KCbVaTVxcHMnJyWIvM3d3d3x8fNBqtWi1Wuzs7Jg2bdod30XJyclERUWh1+uxs7PDy8uL5ORkUlNT/1zOiVqt5vvvv+f9998XE4NWrFhxWy/XYDAQHR3N7Nmzqa6uZsqUKSxfvlxsWNdSGhsbUalUpKWlsW3bNg4ePIhOp8PT0xNBEMRysPs5zTY2NsyaNYtp06YxadIkVCoVX375JTNnzrwnByUmJobJkyczaNAgdu7c2Wwi7L2gUqk4cOAAn3/+OadPn8bDw4Mff/yR0NBQ8UG1bNkyUlJS6NixI127diU5OZnr168zatQovvzyyxZX8sCNJbOff/6ZDRs2kJKSglarpWvXrnz44YdERES0aFaSm5vLa6+9xrFjx265Fo/CA15C4l54VJyToqIiIiMjaWhoICAggLS0NEaMGEFAQECzv2F6PshkMuRyOXl5eYSEhBAcHNzikma4IURWUVFBcXExOTk5ODk5IZPJKC8vp6GhAVdXV1xcXEhNTcXZ2ZmgoCAyMjLQ6/VNBMzq6uowNzfH2dmZqqoqUcFUqVTi6uqKSqWipKQEHx8fJkyYIAo83g1BEPjll1+Qy+WUlJTw5JNPNqmOvFeMRiP5+fmcPHmShoYGwsLCRI2UUaNGUVRURFxcHLm5uTg6OqJSqdDpdJiZmYkRkP79+zNo0KAWlV/X19eTnp7OmTNnqKqqwtHRUVzicXFxYfz48S3KR0xMTOTQoUNotVqUSiX29vYcOnSInTt3/jmcE1PvicjISORyOWvXrmXWrFktemklJSUxf/58Tpw4QdeuXfn444+ZNGlSi73klJQUVqxYwcmTJyktLRWbNbm4uNCrVy9cXV0JCgpCoVBw5swZ4uPjxQSilmBubs6rr75Kv379mDdvHvb29mKN/ZIlS1r0YDIYDCxbtozVq1ezZMkSli9f3qJ9/5by8nKOHTvGP//5T1EKe/DgwWzcuBE7OzuioqLYtm0b8fHxGI1GAgMD2bJlC/7+/qSnp7N06VIiIyPx8PDggw8+YPr06be90U0zhHXr1rF//36uXLmCwWDA19eXJUuWMH78+GbVDO9EZWUl48aN49y5c00+f9gPeAmJe+VhOyf29vYkJiZy5MgRPDw8qKiowNnZmXHjxt01MmowGDh+/DhXr17Fz8+PgwcPihWULc0BjI2NRaPRkJOTg6OjI+3ataOuro6rV6+i0WgwGAw4ODhgbm5OVVUVWq0WS0tLsdOxi4sLTk5OFBcXo9VqKS8vx9HRkc6dO1NcXEy7du3EdiHXr19HLpdjZWWFjY0NEyZMoHv37i06Xz/99BPdunUjOTmZ11577Z57I2k0Gi5fvsylS5eora0lICAACwsLLl26hJeXFzY2Nly+fFls3mh6rwwbNgxfX1/S0tI4f/48gwYNory8nLq6OoYMGSJ2LL4ZQRCoqanh4sWLJCYmUltbi6WlJR07dhTVwPv160fv3r3vaZXBVBWk1+sxMzMjMzOT7du3P/7OSU5ODi+88AIZGRl06dKFTz/9lMmTJ99TZOHq1at88MEHbN26FQsLC0aPHs2KFSvw9/dv0e9otVpKSkpEMTBHR0c8PDxQKBS3rKlWVVURGRnJgQMHOHToECqVisbGRmQymaji+lvMzc1ZuXIlP/zwA0lJSdjY2IidHr/99tsWebB1dXWEh4eTkpLCoUOHxI6Vd8NoNFJcXMzGjRv58ccfKS8vx97eHn9/fyIiIujatStRUVFs3bqVkpISUcHw2WefZc2aNU1mC7W1tWzcuJG1a9dSXV1Nv379mDp1KlOmTMHW1hZ7e3saGxs5ceIE27dv58CBA2LUycrKihdffJGPPvrogZKYs7KyiIiIaNJ9VHJOJNoaD9M5qa6uJjk5mcOHDyMIAnK5nG7dujFx4sR7qo5JS0sjOjqa0tJSLC0tsba2JiQkhAEDBtzSYfm3mJ5LhYWFVFZWAjdk1t3c3Gjfvj02NjZi3ktjYyO5ubmcOXOGiooK1Gr1HZdYrK2t8fHxoaKigry8PGxtbXFycsJoNFJTU0P79u0ZO3YsPj4+dx1jXl4e0dHRODk54ezsfNciBhO1tbWkpaURFxeHwWBAp9Ph5uZGdXU1jY2NODk5ibIPDg4OyOVy5HI5169fJywsjCFDhohjzM/PJyYmBnd3d9zc3EhMTEQQBAIDA7GxscHd3Z2amhqSkpLIzc1Fp9PRqVMnDAYDxcXF2NraMmLECIKCgu47R+jcuXMcPHgQg8Hw58g5OXHiBLNmzSI3N5ewsDA+//zz26q73o3GxkbWrl3L2rVrqampwdHRkeeee4758+fTq1cv5HJ5qyaTarVaKisrKSws5OrVqzg4OIhtp03U19eLXYKVSiXOzs4kJSWhVCr58MMP2bRpE506dWL9+vUtulHOnTvH+PHjGT9+PJs3b76roV25coVPP/2U/fv34+HhQVhYGDNmzECn03HhwgW2bt1KUlISKpUKuOFEeXt7M336dBYtWtRsVMQks//+++9z8uRJdDod7dq1w9LSkt69e3Pt2jWysrLERGILCwv69u3LqlWrWqVDqek8TJkyhfz8fEByTiTaHg/TOTlw4ABnz54VxQwHDx7MsGHD7ktSvqKigh9//FHMIzPlkgUFBdG9e3exG3BrIAgCFRUV5OTkUF5eTmVlJTY2NpibmzdZ6tHr9ZSXl9OnTx9UKhXZ2dm0a9eOiooKGhoa8PHxQRAEevXqxeDBg+867kOHDlFWVkZlZSUzZsxo0i34t+j1ehITE4mNjUUul1NaWorRaCQ0NJTCwkKqq6sxNzenb9++6PV6MbpTUVGBq6srY8aMuUW1G26kPcTGxpKXl4e3tzfW1tbk5OSQn5+PTCZDp9NhZWWFTqdr4tT16tWLESNG3PGYW4rJbtLT09mxY8fj7Zx07dqVgoIC3n33XRYvXnxXb/tuCILAgQMH+OSTT4iPjxdVVb28vBg0aBBPPfUUgYGBODg43LYkuLUxGo0cO3aMlStXcuLECfGlPWDAAFavXs27776L0Wjk22+/bdK19HbjM6nbfvXVV6LybHNERkayYMECnJ2defHFF8UeChMnTmTChAkUFxcjCAIuLi74+fmJ1UazZ88Ww6l3or6+nsTERPbs2UN6ejqhoaH8/PPPJCUlibX1Y8aMYfDgwUyYMKFJAnFr8PHHH/P3v/8dkJwTibbHw3ROVq1ahUajEZdxTL277he1Ws2BAwdIS0vDzs4OJycnCgsL0ev12NjY4OnpiZeXFw4ODnTo0EHU0bjXfZoiH+bm5nd8ngiCQHV1NbGxseTm5iIIAt27d0epVHLs2DE8PT3FaIa/vz+DBw++47GY8iFtbGyQyWQ8++yzzU6y1Go1v/76KxcvXkSlUqHVaunevTv29vZYWlpSUFBAWVmZ6BiVl5ejVqtxcXFhyJAheHl53dWRMyl/m86vTCZDJpPh7OyMpaUlaWlpWFlZ4eLiQlhYGL6+vq32ntNqtWzYsIFff/2V/fv3P97OiUwm429/+xsffPBBq/ZYqK6u5sMPP+Sbb75pIqluZmaGs7MzdnZ2+Pv7Y2dnx7Bhwxg0aBAeHh44OTnR0NBAZWWluEaXn59PQUFBkyRMlUrFyZMnsbS0ZMaMGYSGht7VsdJoNBw+fJh//OMfHD9+HJ1OxyuvvMKsWbOYO3cujY2N7Nq1667VOPX19bz88sskJiby66+/ihLLN/Pdd9+xZMkSsStzTEwMEydORK/Xs3PnTjIyMnBycuL5559n8eLFoobJ/RqxIAhkZWUxc+ZMTp8+jYuLC4sWLSIsLIzAwMBWa51+M/Hx8YwZM4aGhgbJOZFoczxM52Tfvn2Ulpby3HPPPXByvQlBEMjJyWHfvn3Y2dnh6+tLamoq5eXlYlKnaQnJ3NwcJycnunTpgpubG87OztTX14uVkoIgUFVVdUshgtFopLq6GgsLC3x8fAgICKB79+63TcQVBIHi4mJOnjxJVlaW2MunqKgINzc3zM3NkclkeHh48MQTT9xxQmZKHDY3N8ff3/+Wbu0ajYadO3dSWlqKwWDAzc2N/v37c+TIEQoKCnBwcMDZ2ZnKykrs7OwIDQ3F29tbPBf3+v5Tq9VUV1eTkZFBXFycqKxdW1tLcHAwYWFhLYrG3yv79u3j+++/f/wTYpcuXcqHH37Yqo6JCYPBwIYNG1i9ejXFxcV33NbkabZr1476+npKS0vFkjWdTndL+drNyOVyevToweLFi5k0adJdPd/GxkZWrlzJypUrEQSBdevWMWjQIDHPpiUOyoULFxgxYgTTp0/nn//8p3hz6nQ6Nm7cyMaNG/n4449Rq9WcOXOGXr168fXXX3Pp0iXgRvv1DRs24Ofn98DdQY1GIydOnGDmzJlNOgybmZkhl8v56KOP+Nvf/vZA+7jdfnfu3Mm0adMk50SizfEwnZN169Yxffr0B45UN0dNTQ3//e9/KSwsxMHBgY4dO5KWliYmepomQCbNjN+KSJq+NzkzN2NSYzV9bmZmhre3N2FhYc1O0kyYnJT9+/eLS+/Ozs64ublRV1eHhYUFHh4ed42gREdHc+3aNerr65kxY4YYvSkvL2fPnj1cvXpVdIDMzc25ePEiMplMlLN3dHRkwIABDBgw4IGveX19PceOHRN7kjk7O1NdXY2NjQ06nQ57e3tee+21Vn+3qlQqli9fzpo1ax5v56S6urrVw/2/JSsri3HjxpGbm/u77sfS0pLg4GBWrlzJ0KFD7/jS1+l0rFixglWrVmFjY0NUVBQA06dPx8XFhcjIyDsmyRoMBmbPns327dv56KOPmD9/PiUlJfz973/n2LFjfPPNN5ibm/PZZ5+h1+uJj49Ho9Hg5ubG66+/zsKFC1vlwaTT6di+fTsLFy6kqqrqlu+VSiVbtmwhPDz8gffVHA+74kFyTiTul4dpu8XFxa3S/uN2NDY2cvLkSeLi4gBwdXWluroavV4vTvQsLCwwGAxixNYkCW/idpFck1K0yVExJfAHBgYyfPjwO57L6upqdu3aRV1dHZ07dxaVUxsbG6mpqWH06NF31BKprq5m69at+Pv7c+XKFYYOHUpJSQkJCQlYWlri4eGBwWCgoKBArH6ysbGhqqoKZ2dnxowZg6+v732d05spLy/nl19+obS0FKVSiUajwd7eHr1ej5OTEzk5OfTq1YtJkya1ikjeb9m9ezdTpkx5vJ2TP+rG/OSTT1i6dOnvvh+4Uenz/PPP884779CzZ8/bbmeKcixatIjevXsTHR1NamoqM2bMoF+/fmzatOmODkpKSgqjR4+mqqoKX19fKioq6NChA5988glpaWn885//FBNGAQYOHMiGDRvo27dvq6xB5uXlsWzZMnbu3NlEoA5uRJMmTJjAG2+8wahRo3633B7JOZFoqzzutqvVavnqq6/ESUuHDh3QarVNEmdvjpiYIiim11lzzom5uTl2dnYIgoC5uTnt2rXD2tqa7Oxs1Go1tra2DBo0iL59+942gl1dXc327dsZNGgQycnJBAYGkpubK1YMjRo16o7LIceOHSMhIQGNRoONjQ2Ojo5otVosLCzQarVcv34dmUxGnz59qKio4Nq1a4SFhTFo0KD77kdkQq/Xc/78eY4fP45MJhO7RCcmJooVTKbecBMmTPhdViSg5c7Jg8Xk/yQ8iHjOvVJTU8NXX33FqFGjeOutt24bsZHL5cydO5eFCxeSnJzMu+++y+DBg9m9eze5ubmsWLGi2bJkE35+fixfvhyZTEZmZiahoaGsX7+eXbt2sXjxYtExsbS05K9//Sv79+8nODj4gR2F69evs379ekaNGiUq51paWmJmZoa7uzszZswgKiqKH374gdGjR/8hSccSEhKPFqbSYrgR5SgqKkKr1eLk5CRGPExLNAaDQXQ4LCwssLCwED83GAxYWVnh6uoq5sbV19dTXV1Nbm4u6enpGAwG2rdvT/v27Tly5Aj//ve/OX36tFh8cDNKpZJp06Zx4cIFgoKCOHXqFKGhoaJg2969e6mpqbntuEJDQ3F3d0cmk+Hj4yP2szHJ7Xfq1AmZTEZBQQHV1dX85S9/YcSIEQ/kmAiCQGZmJps3byY6Ohpzc3Pkcjk5OTnExcWhVquxtramV69evPzyyzz77LO/m2NyL7Rcku9PTHx8/O/223K5nMDAQBwdHUlNTRWTuoqLi1m/fj2RkZG88847zJ079xaDkcvlvP/++yQmJrJr1y4GDhzIW2+9xc8//8wrr7zCN998wxtvvHHbfb/66qu4ubmJs6E5c+aQnJws3ui9e/dmwYIFvPTSS/ek3tgcjY2N/Prrr6xYsYKEhASMRiMdOnRg1qxZPP3006SlpTFy5Eg6dOjwu4QSJSQk2g4qlUpMaDU5InV1dbi5uWFvb09dXR3t2rXDzMyMqqoqamtrmyzrmJmZYWtrS7t27dBoNNTW1lJSUiI+20wRFqPRiF6vR6PRoNfrcXBwoLa2lujoaFJSUggPD79FXl+pVDJs2DBiY2Pp168f0dHRPP/886SmpnLq1CnOnj3L0KFDm33BKxQKpkyZwt69e9FoNMjlcvLz8+ncuTMODg4UFBTQ2NiIi4sLkyZNeuCE45KSEo4dO0ZWVhbu7u4olUqqqqqQy+X4+/tTVlaGn58fPXv2bLXk5tZCck5aQF1d3e/yuwqFglmzZvHZZ5+hUCioqKjg+PHjfPLJJ6JIWF5eHu+99x5nzpzhH//4xy3qi87Oznz22WeEh4fz2WefERoaSr9+/dizZw85OTli6LM5LCwseOqpp9i8eTNLly6loqIChULB0KFDiYiIYMqUKa2SW5KVlcWSJUvYu3cvWq2WHj16MH36dF566SWxAeKAAQMeeD8SEhKPByan4XafW1hYYGNjg52dHb169SIhIQFra2tkMhkuLi6cPXsWW1tbSktLxQjIzQmxHTp0YMiQISiVSoqKikhPTycvLw9HR0cGDRrEmTNnKCoqYvPmzYwfP54+ffo0yQPs1q0bxcXFFBcXI5fLOXz4MBMmTMDBwYErV66g1+tvG32wsbFh9OjR7Nu3j+rqalQqFUVFRRiNRtzd3Rk9ejS+vr4PNEkTBIGzZ89y9OhRampqcHV1paioiPbt2zNu3DgCAwPvWa32j+ZP6ZwUFRVRWFjIoEGDWrRs0Fx470Hw8vLCz8+PadOmMWXKFLHm3cHBgTFjxhASEsLYsWPJzs4G/q/LZWpqKqtXr2bMmDFNbpSAgAD+93//l1dffZWlS5fy888/4+zsfFfhnPLycpYuXcqWLVuws7PjtddeIyIigqFDh7ZKF01BEDh58iRz584lJSUFe3t7Fi5cyLx58x5I5VVCQqLtkZSUxNWrVxk+fPhdlymaq7Yx5WeYIiomwUqj0dikUic7OxszMzPKy8tvmZyZm5vj5eXF2LFjcXFxQaVS0b59e4KCgvDy8uLIkSMkJCTg4OCARqNBrVbz888/k5mZyejRo5vkSDzxxBPs2LGDLl26kJqaSlFREb6+vndNWs3KyiImJobQ0FCysrLIy8vDx8dHlKR40MixVqvl0KFDnD9/HrVaLQqITpw4EX9//0diyaYl/OmcE5N8uUwmIz4+/q6Ko/n5+Zw4caLV9m9lZcWXX34p5lM0NDSIYmQJCQlUVlayePFi3n77bRYuXNhk9pCcnMzkyZOZPXs2f/vb35pEUSIiIjh+/DibN2/m//2//8dbb711R8ervLycl156iejoaAICAvjqq6/o37//A5cGm9BqtezatYu33nqLuro6hg4dyvLlyxkyZIi0bCMh8Sfj3LlzJCYmYjQaKSsru2uD1bS0NLFxnbm5OR4eHjQ2NlJdXS06HKbKG1M0xLSsY+o0bGdnh8FgQK1WA+Dm5kZwcDBOTk6UlpZy7Ngx8vLyUKvVCIJA586dxQaAjY2NTZZ/UlJSKCwsZNSoUfj5+QEgk8kYO3Yse/bsoVevXsTExDBt2rQ7RiRMjomDgwMXL17E09OTOXPmtFqCcVVVFVFRUaSlpSGXy7GxsSEwMJCRI0e2mtLuH8WfqlqnoKCA8PBwkpKS8PDw4PDhw3fsjqtWq5k9ezbff/99axy2iIuLi+hYNDY2kp+f3yR51c7OjvXr13PkyBG2bt16y9+bmZkREhLCtm3bmlT0lJSU8MQTT9DQ0EBkZCT9+vVrdv/l5eXMnDmTmJgYJkyYwNdff33PTfTuhEaj4e9//ztffPEF5ubmzJ07l//5n/9p9ZvDFNG635nA417xIPH40pZs98KFCxw8eBBnZ2cUCgXe3t4MGzbstttfu3aN7du3U1dXh0wmIyQkhOzsbLFyx+ScmJwRkxNhepWZnBOlUknHjh2prKwUm7LerJdy8+TNzMwMGxsbGhoa0Ol0Yq+zTp06if144Eae39ChQxk0aJCYh3f+/HlSUlJQKpXY2dkxYsSIZieGWVlZnD59mh49ehAfH8+oUaPo3bt3q00IKysr2blzJ+Xl5RgMBlxdXXn66afp0qVLqxQWmETsVCoVgiDg4eFxX7mILa3W+dNETurr61myZAlJSUkEBQVRV1fHxYsX7+icHDhwgJ07d97xd03lV3Bj+aUl3YbLy8spLy+/47G+8cYbTJ48GblcfkvVjSAInDt3jieffJIvvviC8PBw5HI57u7uvP322yxatIj58+cTHR19i0NgipicPn2a5cuX88477zxwidrN1NTUsHz5ctatW4dCoWDNmjXMnj37vnriCIJAQUEBhYWFTRr16XQ6YmNjxXM4fvx4hg8fTkhISKssR0lISLQOV69e5ciRI+h0OsrKynBxcbmjqKXRaOTw4cPU19fj6uqKubk5586dQ6/Xi06IqXTYFIE1LeuYXvIm/ZPa2lpSUlLo1q0bnTp1wmg0UllZicFgEFtk9O7dm9TUVAoKCsTGoyZdEUEQUKlU2NjYiP3DtFothw8fpqioiHHjxqFUKvHz8+PChQt4eXkRHx+Pt7f3LRWeWVlZxMXF4ezsTFpaGpMnT6ZTp06tdp4zMzM5dOgQcKNxoZOTE5MnT76vnjj19fWkpaVRX1/fJKVBpVKRlpaGQqEQn7Pe3t4EBQXh5ubW6stFfxrn5JdffmHHjh0EBweze/duzMzM7tjaWxAEfvnll9uquyqVSgIDA5kxYwb9+/fHzMyMtLQ0Tp48SXZ2NgaDgdTUVK5fvy6GDe8FlUrF999/f0dn5+rVq0yfPp0FCxawZMkS7O3tefXVV9mxYwcJCQns3buXadOmids3Njby17/+lYKCAn744YdbclcelOrqat58801++OEH7O3t2bhxIxEREfe9jFNTU8OGDRvIyMhoNu9HEASSk5OJi4vD1taWvn37MmTIEMaPH8+AAQMkR0VC4iFiNBqJiYnB39+fCxcu8OSTT4r9Ym5HfX09hYWFoqNwc0M+ExYWFri5udGhQwfMzMwoLS2lvLxcbDVyc5TAJIuvUChwdXXF2dkZlUpFbW0teXl55OXliSXHDQ0NjB8/nvPnz4sy8pWVlVhYWIgOkek3MzIyqKio4Omnn8bLy4sRI0aI3d5PnjwplgTDjed0dHS0+DtTpkxp1SjypUuXOH78OEajkdraWrp3787YsWPvu5jhypUrlJWV4ezs3CSyUV9fL46hQ4cOODs7o9Pp+OWXXzAYDPTq1Yu+ffu2WhT+T+OcxMbGYmVlxZo1a+663gk3BGvq6+uRy+V4eHjg4OCAlZUV/fv3Jzg4mJEjR+Lp6dlkfdHf35+pU6cCN27MqqoqioqKxHryixcvUlpaSkFBgVi21tyFFASBwsJCsdnendBoNKxZs4a6ujqWLVuGi4sLq1ev5plnnmHhwoX4+fkREBAAQHFxMUqlkv3797foHNwLRUVFvPLKKxw9epTg4GDWrl3L0KFDHyicqFQqWb169W2/12q1ZGdnExkZybZt2zhz5gwXL14kOjqaZcuWMXHixPvet4SExIOhUqmoqqrCzs6OHj16EBISctfngUqlQiaT0dDQIOqXmJZcnJ2d6dGjB15eXnTq1KnJb9XX19PY2IjRaKSoqIjS0tImTotaraaoqAhAXN75bUS6pqaGqKgorK2tmywTKRQKunTpwpUrV0QHyJSQ+9NPPzFp0iR8fHzo2LEj9fX12NnZceLECYYPH465uTl5eXl07NiRTp06ERQU1Go5dwaDgeTkZGJiYhg6dCjnz58nICCAcePGPVAUw9/fny5dujQ7IfTz80OtVnP58mUuX76MmZkZAQEB5ObmYmVlRXJyMiNHjmyVSW+bzDmpqalBqVSK/RdawvLlyzlx4gRRUVEtnlFXVVWRnp5Ojx49sLOzw8zM7IFm4zqdDp1OR1FREXq9Hnt7+1tq6E3k5+cTHx/P7t27iY2NbdHvL1y4kGXLlmE0GnnzzTf5/vvv8fPzY9OmTfTo0UO8KVs7IfXKlStMnTqVjIwMrK2tOXDgwF37+7Q29fX1nDp1ig4dOuDt7Y1CobjrDVJbW0unTp3+kDYIJu7HdiUkfsvDtN2tW7e2aClYo9Fw8OBB1Go1w4YNw8PDo0X7ycnJISUlRdT+sLCwwNXVFYVCcU/PLkEQ0Gg0VFVV0dDQQGFhIQaD4a7P3ZycHKytrZu84O3t7XF3dyc3N1fs+6XX6zEajVhZWTF27FgaGxs5evQotra2VFdX4+/vT/fu3VGr1ajVapycnFpVVDI/P5/U1FTMzMzQ6XS4uroycODAP0y4UqvVkp+fT1JSEk5OTnh5eYk6NHfi1KlTfPbZZ3e13TbpnOTm5t6xUZOExL1QWFj4h5U2S7Yr0ZpItivRVrmb7bbJZR1Tkk9BQcEfNmv4ozHNjB7nGfbDHqMgCNTV1f2uTcx+i2S7jwcPe4yS7f4+POzr+kfwsMfYUtttk86JKVzv6Oj42BqQCQcHB2mMvyN/9ENWst3HC8l2H08k2/19aYntSo3/JCQkJCQkJB4pJOdEQkJCQkJC4pGiTTonCoWCZcuWPdY6FtIYH0/+DGOWxvh48mcYszTGR4c2Wa0jISEhISEh8fjSJiMnEhISEhISEo8vknMiISEhISEh8UghOScSEhISEhISjxSScyIhISEhISHxSCE5JxISEhISEhKPFG3SOfnXv/6Fl5cXVlZWDBw4kISEhId9SC1i5cqV9O/fH3t7e1xdXZk0aRIZGRlNtmlsbGTevHm0a9cOOzs7Jk+eTGlpaZNtCgoKmDBhAjY2Nri6uvLee++h1+v/yKG0mFWrVmFmZsY777wjfva4jfFekGy37VxXyXabItlu27muj4XtCm2MHTt2CJaWlsKmTZuE1NRUYfbs2YJSqRRKS0sf9qHdlbFjxwqbN28WUlJShKSkJOGpp54SOnfuLNTX14vbzJkzR+jUqZNw+PBh4dy5c8KgQYOEsLAw8Xu9Xi/4+fkJo0aNEi5cuCBERUUJ7du3F/72t789jCHdkYSEBMHLy0sICAgQ5s+fL37+OI3xXpBst+1cV8l2myLZbtu5ro+L7bY552TAgAHCvHnzxP83GAyCp6ensHLlyod4VPdHWVmZAAjHjx8XBEEQqqurBblcLvz444/iNunp6QIgnDp1ShAEQYiKihLMzc2FkpIScZuNGzcKDg4Ogkaj+WMHcAfq6uqE7t27CzExMcKwYcPEm+RxGuO9Itlu27iuku3eimS7beO6Pk6226aWdbRaLYmJiYwaNUr8zNzcnFGjRnHq1KmHeGT3R01NDfB/3T4TExPR6XRNxtezZ086d+4sju/UqVP4+/vj5uYmbjN27Fhqa2tJTU39A4/+zsybN48JEyY0GQs8XmO8FyTbbTvXVbLdpki223au6+Nku22qK3FFRQUGg6HJyQNwc3Pj8uXLD+mo7g+j0cg777zDE088gZ+fHwAlJSVYWlqiVCqbbOvm5kZJSYm4TXPjN333KLBjxw7Onz/P2bNnb/nucRnjvSLZbtu4rpLt3opku23juj5uttumnJPHiXnz5pGSkkJcXNzDPpRWpbCwkPnz5xMTE4OVldXDPhyJ3wHJdiXaKpLtth3a1LJO+/btsbCwuCXDuLS0FHd394d0VPfOm2++SWRkJEePHqVjx47i5+7u7mi1Wqqrq5tsf/P43N3dmx2/6buHTWJiImVlZQQHByOTyZDJZBw/fpwvv/wSmUyGm5tbmx/j/SDZ7qN/XSXbbR7Jdh/96/pY2u4fnuXygAwYMEB48803xf83GAxChw4d2kRiltFoFObNmyd4enoKmZmZt3xvSlr66aefxM8uX77cbNLSzVnyX331leDg4CA0Njb+/oO4C7W1tcKlS5ea/OvXr58wffp04dKlS4/FGO8XyXYf7esq2e7tkWz30b6uj6PttjnnZMeOHYJCoRC2bNkipKWlCa+//rqgVCqbZBg/qsydO1dwdHQUjh07Jly7dk38p1KpxG3mzJkjdO7cWThy5Ihw7tw5ITQ0VAgNDRW/N5V7jRkzRkhKShIOHjwouLi4PJIlbSZuzhoXhMdzjC1Bst22d10l272BZLtt77q2ddttc86JIAjCunXrhM6dOwuWlpbCgAEDhNOnTz/sQ2oRQLP/Nm/eLG6jVquFN954Q3BychJsbGyEZ599Vrh27VqT38nLyxPGjx8vWFtbC+3btxfeffddQafT/cGjaTm/vUkexzG2FMl229Z1lWz3/5Bst21d17Zuu2aCIAh/3CKShISEhISEhMSdaVMJsRISEhISEhKPP5JzIiEhISEhIfFIITknEhISEhISEo8UknMiISEhISEh8UghOScSEhISEhISjxSScyIhISEhISHxSCE5JxISEhISEhKPFJJzIiEhISEhIfFIITknEhISEhISEo8UknMiISEhISEh8UghOScSEhISEhISjxT/Hy7wUJLUXV9oAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import draw_tools\n", + "image = numpy.array(PIL.Image.open(io.BytesIO(recorder.image.value)))[..., :3]\n", + "# 生成风格图像\n", + "im = draw_tools.generate_style_image(image)\n", + "# 获取轮廓列表\n", + "contour_list = draw_tools.getContourList(im, pen_width = 3, min_contour_len = 30, is_show=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# 对轮廓列表进行排序\n", + "contour_list = draw_tools.sortContoursList(contour_list)\n", + "# 平滑拟合并采样轮廓\n", + "f_contour_list = draw_tools.sample_and_smooth_contours(im, contour_list, is_show=False)\n", + "# 保存轮廓点到文件中,每个轮廓占一行,x和y坐标用逗号分割,点之间用逗号分割\n", + "draw_tools.save_contour_points(f_contour_list, \"../data/contour_data.txt\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import shutil\n", + "shutil.copy( \"../data/contour_data.txt\", \"/home/robot/Work/system/bspline.txt\")\n", + "import os\n", + "currdir = os.getcwd()\n", + "os.chdir('/home/ck/')" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "#1、执行./IGH_rc.sh,启动igh\n", + "#2、执行./runIGH.sh,开启通讯\n", + "#3、执行./runrobot.sh,运行画画程序\n", + "#4、执行./stoprobot.sh,关闭画画程序\n", + "#5、执行./runrobotoig.sh,运行运动程序,可在桌面程序上运动\n", + "#6、执行./stoprobotoig.sh,关闭运动程序\n", + "#7、执行./stopIGH.sh,关闭通讯" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting EtherCAT master 1.5.2 done\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#1、执行./IGH_rc.sh,启动igh\n", + "os.system(\"./IGH_rc.sh\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Failed to reserve master: Device or resource busy\n", + "root 38530 1 1 Dec06 ? 00:04:26 ./IgHEtherCATMaster --task run --file ./eni.xml --affinity 1\n", + "root 111117 110176 0 02:13 ? 00:00:00 sh -c ps -ef | grep Master\n", + "root 111119 111117 0 02:13 ? 00:00:00 grep Master\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import time\n", + "#2、执行./runIGH.sh,开启通讯\n", + "os.system(\"./runIGH.sh\")\n", + "time.sleep(5)\n", + "os.system(\"ps -ef | grep Master\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "============start robot control======================\n", + "start run time:20231207021353\n", + "verison: 11.4.6\n", + "Author: HanBing\n", + "Email: A994055925@163.com\n", + "Data file path: /home/ck/robot_config/data/\n", + "Start system initialize!\n", + "OUT SINGLE ABSOLUTE ENCOUDER(POSITION CLOSED LOOP)\n", + "group init existing problem. Some fields are not found!\n", + "dof num is 6\n", + "control:6,state:6,mode:0\n", + "ac_position:6,ac_velocity:0,ac_torque:0,ac_position2:0,ac_velocity2:0,ac_sensor_torque:0,ac_mode:0,ErrorCode:0,FollowingErrorActualValue:0\n", + "position:6,velocity:0,torque:0,VelocityOffset:0,TorqueOffset:0,TorqueMaxLimit:0,TorqueMinLimit:0\n", + "/////////////////////////////////////////////////////////////////\n", + "fd_ecat_in_name: /ecat_in\n", + "fd_ecat_out_name: /ecat_out\n", + "/////////////////////////////////////////////////////////////////\n", + "state: Status Word\n", + "control: Control word\n", + "mode: \n", + "ac_position: Position actual value\n", + "ac_velocity: \n", + "ac_torque: \n", + "ac_position2: \n", + "ac_velocity2: \n", + "ac_sensor_torque: \n", + "ac_mode: \n", + "ErrorCode: \n", + "FollowingErrorActualValue: \n", + "position: Target Position\n", + "velocity: \n", + "torque: \n", + "VelocityOffset: \n", + "TorqueOffset: \n", + "TorqueMaxLimit: \n", + "TorqueMinLimit: \n", + "ec_di: \n", + "ec_do: \n", + "ec_ai: \n", + "ec_ao: \n", + "ec_li: \n", + "ec_lo: \n", + "Number of fields: 26\n", + "busyTs: 4000000 ns\n", + "/////////////////////////////////////////////////////////////////\n", + "ec_device1/robot0: \n", + "DOF: 6\n", + "0_state: 64\n", + "0_control: 64\n", + "0_mode: -999999\n", + "0_ac_position: 0\n", + "0_ac_velocity: -999999\n", + "0_ac_torque: -999999\n", + "0_ac_current: -999999\n", + "0_ac_position2: -999999\n", + "0_ac_velocity2: -999999\n", + "0_ac_sensor_torque: -999999\n", + "0_ac_mode: -999999\n", + "0_ErrorCode: -999999\n", + "0_FollowingErrorActualValue: -999999\n", + "0_position: 0\n", + "0_velocity: -999999\n", + "0_torque: -999999\n", + "0_VelocityOffset: -999999\n", + "0_TorqueOffset: -999999\n", + "0_TorqueMaxLimit: -999999\n", + "0_TorqueMinLimit: -999999\n", + "1_state: 144\n", + "1_control: 144\n", + "1_mode: -999999\n", + "1_ac_position: 80\n", + "1_ac_velocity: -999999\n", + "1_ac_torque: -999999\n", + "1_ac_current: -999999\n", + "1_ac_position2: -999999\n", + "1_ac_velocity2: -999999\n", + "1_ac_sensor_torque: -999999\n", + "1_ac_mode: -999999\n", + "1_ErrorCode: -999999\n", + "1_FollowingErrorActualValue: -999999\n", + "1_position: 80\n", + "1_velocity: -999999\n", + "1_torque: -999999\n", + "1_VelocityOffset: -999999\n", + "1_TorqueOffset: -999999\n", + "1_TorqueMaxLimit: -999999\n", + "1_TorqueMinLimit: -999999\n", + "2_state: 224\n", + "2_control: 224\n", + "2_mode: -999999\n", + "2_ac_position: 160\n", + "2_ac_velocity: -999999\n", + "2_ac_torque: -999999\n", + "2_ac_current: -999999\n", + "2_ac_position2: -999999\n", + "2_ac_velocity2: -999999\n", + "2_ac_sensor_torque: -999999\n", + "2_ac_mode: -999999\n", + "2_ErrorCode: -999999\n", + "2_FollowingErrorActualValue: -999999\n", + "2_position: 160\n", + "2_velocity: -999999\n", + "2_torque: -999999\n", + "2_VelocityOffset: -999999\n", + "2_TorqueOffset: -999999\n", + "2_TorqueMaxLimit: -999999\n", + "2_TorqueMinLimit: -999999\n", + "3_state: 304\n", + "3_control: 304\n", + "3_mode: -999999\n", + "3_ac_position: 240\n", + "3_ac_velocity: -999999\n", + "3_ac_torque: -999999\n", + "3_ac_current: -999999\n", + "3_ac_position2: -999999\n", + "3_ac_velocity2: -999999\n", + "3_ac_sensor_torque: -999999\n", + "3_ac_mode: -999999\n", + "3_ErrorCode: -999999\n", + "3_FollowingErrorActualValue: -999999\n", + "3_position: 240\n", + "3_velocity: -999999\n", + "3_torque: -999999\n", + "3_VelocityOffset: -999999\n", + "3_TorqueOffset: -999999\n", + "3_TorqueMaxLimit: -999999\n", + "3_TorqueMinLimit: -999999\n", + "4_state: 384\n", + "4_control: 384\n", + "4_mode: -999999\n", + "4_ac_position: 320\n", + "4_ac_velocity: -999999\n", + "4_ac_torque: -999999\n", + "4_ac_current: -999999\n", + "4_ac_position2: -999999\n", + "4_ac_velocity2: -999999\n", + "4_ac_sensor_torque: -999999\n", + "4_ac_mode: -999999\n", + "4_ErrorCode: -999999\n", + "4_FollowingErrorActualValue: -999999\n", + "4_position: 320\n", + "4_velocity: -999999\n", + "4_torque: -999999\n", + "4_VelocityOffset: -999999\n", + "4_TorqueOffset: -999999\n", + "4_TorqueMaxLimit: -999999\n", + "4_TorqueMinLimit: -999999\n", + "5_state: 464\n", + "5_control: 464\n", + "5_mode: -999999\n", + "5_ac_position: 400\n", + "5_ac_velocity: -999999\n", + "5_ac_torque: -999999\n", + "5_ac_current: -999999\n", + "5_ac_position2: -999999\n", + "5_ac_velocity2: -999999\n", + "5_ac_sensor_torque: -999999\n", + "5_ac_mode: -999999\n", + "5_ErrorCode: -999999\n", + "5_FollowingErrorActualValue: -999999\n", + "5_position: 400\n", + "5_velocity: -999999\n", + "5_torque: -999999\n", + "5_VelocityOffset: -999999\n", + "5_TorqueOffset: -999999\n", + "5_TorqueMaxLimit: -999999\n", + "5_TorqueMinLimit: -999999\n", + "/////////////////////////////////////////////////////////////////\n", + "/////////////////////////////////////////////////////////////////\n", + "/////////////////////////////////////////////////////////////////" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dev/mem: Bad address\n", + "/dev/mem: Bad address\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Device is running\n", + "InitRobot\n", + "Hardware Match\n", + "printf_hardware_information: can't get cpuid\n", + "Hardware information: \n", + " cpuid:0000000000000000\n", + " mac:128a5e84678d\n", + "init_robot_teach\n", + "set_robot_index\n", + "get_robot_num\n", + "set_addition_index\n", + "get_addition_num\n", + "get_robot_num\n", + "initialize finish\n", + "start Draw\n", + "power is finish!\n", + "150\n", + "1\n", + "2\n", + "3\n", + "4\n", + "5\n", + "6\n", + "7\n", + "8\n", + "9\n", + "10\n", + "11\n", + "12\n", + "13\n", + "14\n", + "15\n", + "16\n", + "17\n", + "18\n", + "19\n", + "20\n", + "21\n", + "22\n", + "23\n", + "24\n", + "25\n", + "26\n", + "27\n", + "28\n", + "29\n", + "30\n", + "31\n", + "32\n", + "33\n", + "34\n", + "35\n", + "36\n", + "37\n", + "38\n", + "39\n", + "40\n", + "41\n", + "42\n", + "43\n", + "44\n", + "45\n", + "46\n", + "47\n", + "48\n", + "49\n", + "50\n", + "51\n", + "52\n", + "53\n", + "54\n", + "55\n", + "56\n", + "57\n", + "58\n", + "59\n", + "60\n", + "61\n", + "62\n", + "63\n", + "64\n", + "65\n", + "66\n", + "67\n", + "68\n", + "69\n", + "70\n", + "71\n", + "72\n", + "73\n", + "74\n", + "75\n", + "76\n", + "77\n", + "78\n", + "79\n", + "80\n", + "81\n", + "82\n", + "83\n", + "84\n", + "85\n", + "86\n", + "87\n", + "88\n", + "89\n", + "90\n", + "91\n", + "92\n", + "93\n", + "94\n", + "95\n", + "96\n", + "97\n", + "98\n", + "99\n", + "100\n", + "101\n", + "102\n", + "103\n", + "104\n", + "105\n", + "106\n", + "107\n", + "108\n", + "109\n", + "110\n", + "111\n", + "112\n", + "113\n", + "114\n", + "115\n", + "116\n", + "117\n", + "118\n", + "119\n", + "120\n", + "121\n", + "122\n", + "123\n", + "124\n", + "125\n", + "126\n", + "127\n", + "128\n", + "129\n", + "130\n", + "131\n", + "132\n", + "133\n", + "134\n", + "135\n", + "136\n", + "137\n", + "138\n", + "139\n", + "140\n", + "141\n", + "142\n", + "143\n", + "144\n", + "145\n", + "146\n", + "147\n", + "148\n", + "149\n", + "150\n" + ] + } + ], + "source": [ + "# 运行画画\n", + "# 3、执行./runrobot.sh,运行画画程序\n", + "# 4、执行./stoprobot.sh,关闭画画程序\n", + "# 5、执行./runrobotoig.sh,运行运动程序,可在桌面程序上运动\n", + "# 6、执行./stoprobotoig.sh,关闭运动程序\n", + "os.system(\"./runrobot.sh\")\n", + "# os.system(\"./stoprobot.sh\")\n", + "# os.system(\"./runrobotoig.sh\")\n", + "# os.system(\"./stoprobotoig.sh\")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "#4、执行./stoprobot.sh,关闭画画程序\n", + "os.system(\"./stoprobot.sh\")\n", + "os.system(\"./stoprobot.sh\")\n", + "os.system(\"./stopIGH.sh\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/robot_painting/qmupd_vs/.vscode/launch.json b/robot_painting/qmupd_vs/.vscode/launch.json new file mode 100644 index 0000000000000000000000000000000000000000..05c523d521c4e4d0ebf50c396eae31ccd52a6102 --- /dev/null +++ b/robot_painting/qmupd_vs/.vscode/launch.json @@ -0,0 +1,15 @@ +{ + // 使用 IntelliSense 了解相关属性。 + // 悬停以查看现有属性的描述。 + // 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python 调试程序: 当前文件", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + } + ] +} \ No newline at end of file diff --git a/robot_painting/qmupd_vs/LICENSE b/robot_painting/qmupd_vs/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/robot_painting/qmupd_vs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/robot_painting/qmupd_vs/README_CN.md b/robot_painting/qmupd_vs/README_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..115c28022189ef5cb104ab2756b317d5323c9394 --- /dev/null +++ b/robot_painting/qmupd_vs/README_CN.md @@ -0,0 +1,155 @@ +# General Virtual Sketching Framework for Vector Line Art - SIGGRAPH 2021 + +[[论文]](https://esslab.jp/publications/HaoranSIGRAPH2021.pdf) | [[项目主页]](https://markmohr.github.io/virtual_sketching/) + +这份代码能用于实现:**线稿矢量化**、**粗糙草图简化**和**自然图像到矢量草图转换**。 + + + +     + +## 目录 +- [环境依赖](#环境依赖) +- [使用预训练模型测试](#使用预训练模型测试) +- [重新训练](#重新训练) +- [引用](#引用) + +## 环境依赖 + - [Tensorflow](https://www.tensorflow.org/) (1.12.0 <= 版本 <=1.15.0) + - [opencv](https://opencv.org/) == 3.4.2 + - [pillow](https://pillow.readthedocs.io/en/latest/index.html) == 6.2.0 + - [scipy](https://www.scipy.org/) == 1.5.2 + - [gizeh](https://github.com/Zulko/gizeh) == 0.1.11 + +## 使用预训练模型测试 +### 模型下载与准备 + +在[这里](https://drive.google.com/drive/folders/1-hi2cl8joZ6oMOp4yvk_hObJGAK6ELHB?usp=sharing)下载模型: + - `pretrain_clean_line_drawings` (105 MB): 用于线稿矢量化 + - `pretrain_rough_sketches` (105 MB): 用于粗糙草图简化 + - `pretrain_faces` (105 MB): 用于自然图像到矢量草图转换 + +然后,按照如下结构放置模型: +``` +outputs/ + snapshot/ + pretrain_clean_line_drawings/ + pretrain_rough_sketches/ + pretrain_faces/ +``` + +### 测试方法 +在`sample_inputs/`文件夹下选择图像,然后根据任务类型运行下面其中一个命令。生成结果会在`outputs/sampling/`目录下看到。 + +``` python +python3 test_vectorization.py --input muten.png + +python3 test_rough_sketch_simplification.py --input rocket.png + +python3 test_photograph_to_line.py --input 1390.png +``` + +**注意!!!** 我们的方法从一个随机挑选的初始位置启动绘制,所以每跑一次测试理论上都会得到一个不同的结果(有可能效果不错,但也可能效果不是很好)。因此,建议做多几次测试来挑选看上去最好的结果。也可以通过设置 `--sample`参数来定义跑一次测试代码同时输出(不同结果)的数量: + +``` python +python3 test_vectorization.py --input muten.png --sample 10 + +python3 test_rough_sketch_simplification.py --input rocket.png --sample 10 + +python3 test_photograph_to_line.py --input 1390.png --sample 10 +``` + +**如何复现论文展示的结果?** 可以从[这里](https://drive.google.com/drive/folders/1-hi2cl8joZ6oMOp4yvk_hObJGAK6ELHB?usp=sharing)下载论文展示的结果。这些是我们通过若干次测试得到不同输出后挑选的最好的结果。显然,若要复现这些结果,需要使用相同的初始位置启动绘制。 + +### 其他工具 + +#### a) 可视化 + +我们的矢量输出均使用`npz` 文件包存储。运行以下的命令可以得到渲染后的结果以及绘制顺序。可以在`npz` 文件包相同的目录下找到这些可视化结果。 +``` python +python3 tools/visualize_drawing.py --file path/to/the/result.npz +``` + +#### b) GIF制作 + +若要看到动态的绘制过程,可以运行以下命令来得到 `gif`。结果在`npz` 文件包相同的目录下。 +``` python +python3 tools/gif_making.py --file path/to/the/result.npz +``` + + +#### c) 转化为SVG + +`npz` 文件包中的矢量结果均按照论文里面的公式(1)格式存储。可以运行以下命令行,来将其转化为 `svg` 文件格式。结果在`npz` 文件包相同的目录下。 + +``` python +python3 tools/svg_conversion.py --file path/to/the/result.npz +``` + - 转化过程以两种模式实现(设置`--svg_type`参数): + - `single` (默认模式): 每个笔划(一根单独的曲线)构成SVG文件中的一个path路径 + - `cluster`: 每个连续曲线(多个笔划)构成SVG文件中的一个path路径 + +**重要注意事项** + +在SVG文件格式中,一个path上的所有线段均只有同一个线宽(*stroke-width*)。然而在我们论文里面,定义一个连续曲线上所有的笔划可以有不同的线宽。同时,对于一个单独的笔划(贝塞尔曲线),定义其线宽从一个端点到另一个端点线性递增或者递减。 + +因此,上述两个转化方法得到的SVG结果理论上都无法保证跟论文里面的结果在视觉上完全一致。(*假如你在论文里面使用这里转化后的SVG结果进行视觉上的对比,请提及此问题。*) + + +
+ +## 重新训练 + +### 训练准备 + +在[这里](https://drive.google.com/drive/folders/1-hi2cl8joZ6oMOp4yvk_hObJGAK6ELHB?usp=sharing)下载模型: + - `pretrain_neural_renderer` (40 MB): 预训练好的神经网络渲染器 + - `pretrain_perceptual_model` (691 MB): 预训练好的perceptual model,用于算 raster loss + +在[这里](https://drive.google.com/drive/folders/1-hi2cl8joZ6oMOp4yvk_hObJGAK6ELHB?usp=sharing)下载训练数据集: + - `QuickDraw-clean` (14 MB): 用于线稿矢量化。来自 [QuickDraw](https://github.com/googlecreativelab/quickdraw-dataset)数据集。 + - `QuickDraw-rough` (361 MB): 用于粗糙草图简化。利用[Sketch Simplification](https://github.com/bobbens/sketch_simplification#pencil-drawing-generation)里面的铅笔画图像生成方法合成。 + - `CelebAMask-faces` (370 MB): 用于自然图像到矢量草图转换。使用[CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ)数据集进行处理后得到。 + +然后,按照如下结构放置数据集: +``` +datasets/ + QuickDraw-clean/ + QuickDraw-rough/ + CelebAMask-faces/ +outputs/ + snapshot/ + pretrain_neural_renderer/ + pretrain_perceptual_model/ +``` + +### 训练方法 + +建议使用多GPU进行训练。每个任务,我们均使用2个GPU(每个11 GB)来训练。 + +``` python +python3 train_vectorization.py + +python3 train_rough_photograph.py --data rough + +python3 train_rough_photograph.py --data face +``` + +
+ +## 引用 + +若使用此代码和模型,请引用本工作,谢谢! + +``` +@article{mo2021virtualsketching, + title = {General Virtual Sketching Framework for Vector Line Art}, + author = {Mo, Haoran and Simo-Serra, Edgar and Gao, Chengying and Zou, Changqing and Wang, Ruomei}, + journal = {ACM Transactions on Graphics (Proceedings of ACM SIGGRAPH 2021)}, + year = {2021}, + volume = {40}, + number = {4}, + pages = {51:1--51:14} +} +``` + diff --git a/robot_painting/qmupd_vs/camera_tools.py b/robot_painting/qmupd_vs/camera_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..786ba68b877238de4231f8d980165462e30093a7 --- /dev/null +++ b/robot_painting/qmupd_vs/camera_tools.py @@ -0,0 +1,108 @@ +import time +import cv2 +import tkinter as tk +from PIL import Image, ImageTk + +class CameraApp: + def __init__(self): + # 创建界面 + self.window = tk.Tk() # 创建一个窗口对象 + self.window.title("拍照小程序") # 设置窗口标题 + self.window.geometry("1800x1300") # 设置窗口大小 + + # 创建显示拍摄照片的控件 + self.photo_label = tk.Label(self.window, width=512, height=512) # 创建一个标签控件 + self.photo_label.pack() # 将标签控件添加到窗口中 + + # 创建拍照按钮 + self.take_photo_button = tk.Button(self.window, text="拍照", command=self.take_photo, width=80, height=50, font=40) # 创建一个按钮控件 + self.take_photo_button.pack() # 将按钮控件添加到窗口中 + + # 打开摄像头 + self.cap = cv2.VideoCapture(0) # 创建一个 VideoCapture 对象,打开默认摄像头 + # 设置图像的宽度和高度 + self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 512) + self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 512) + _, self.frame = self.cap.read() # 读取摄像头的一帧数据 + self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB) # 将 BGR 格式的图片转换为 RGB 格式 + + self.image_flipped = True # 控制是否镜像照片 + self.last_photo = None + self.last_photo_name = None + # 设置界面保持更新 + self.update_frame() + + self.window.mainloop() # 进入窗口消息循环,等待用户操作 + + def update_frame(self): + _, self.frame = self.cap.read() # 读取新的摄像头帧数据 + + if self.image_flipped: + self.frame = cv2.flip(self.frame, 1) # 如果需要镜像显示照片,则在更新帧时进行翻转操作 + + self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB) # 将 BGR 格式的图片转换为 RGB 格式 + + # 将摄像头帧转为 PIL 图片格式 + pil_image = Image.fromarray(self.frame) + + # 将 PIL 图片转为 Tkinter 中可以显示的图片格式 + tk_image = ImageTk.PhotoImage(image=pil_image) + + # 更新显示照片的控件图片 + self.photo_label.configure(image=tk_image) # 将标签控件的图片属性设置为新的图片 + self.photo_label.image = tk_image # 将标签控件的 image 属性设置为新的图片 + + # 循环更新帧 + self.window.after(10, self.update_frame) # 在 10 毫秒之后调用 update_frame 函数,实现不断更新摄像头帧的效果 + + def take_photo(self): + # 拍照 + _, frame = self.cap.read() # 读取摄像头的一帧数据 + + if self.image_flipped: + frame = cv2.flip(frame, 1) # 如果需要镜像照片,则在拍照时进行翻转操作 + self.last_photo = frame + # 获取十三位时间戳 + now_time = Utils().getCurrentDateLong() # 使用 Utils 类中的方法获取当前时间的 13 位时间戳 + self.last_photo_name = f"./robot_data/input/{now_time}.jpg" + # 保存照片,以时间戳命名 + cv2.imwrite(self.last_photo_name, frame) # 保存图片到指定路径下,以当前时间戳作为文件名 + + # 将照片显示在控件中 + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # 将 BGR 格式的图片转换为 RGB 格式 + pil_image = Image.fromarray(frame) # 将摄像头帧转换为 PIL 图片格式 + tk_image = ImageTk.PhotoImage(image=pil_image) # 将 PIL 图片转为 Tkinter 可以显示的图片格式 + self.photo_label.configure(image=tk_image) # 将标签控件的图片属性设置为新的图片 + self.photo_label.image = tk_image # 将标签控件的 image 属性设置为新的图片 + + # 关闭窗口和摄像头 + self.cap.release() # 释放摄像头 + self.window.quit() # 退出窗口 + self.window.destroy() # 销毁窗口 + +class Utils(): + # 获取 13 位的时间戳 + def getCurrentDateLong(self): + current_timestamp = int(round(time.time() * 1000)) # 获取当前时间的时间戳(精确到毫秒) + return current_timestamp + + +class SocketUtils: + def __init__(self): + self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.client.connect(('192.168.1.70', 8080)) + imageFile = open('1_real_fake.png', 'rb') + imageBytes = imageFile.read() + imageFile.close() + imageSize = len(imageBytes) + self.client.send(struct.pack('!I', image_size)) + self.client.send(imageBytes) + response = self.client.recv(1024) + self.client.close() + + +# 主函数 +if __name__ == "__main__": + # python test_seq_style3.py --input 1_real_fake.png\n + app = CameraApp() # 创建 CameraApp 对象启动程序 + \ No newline at end of file diff --git a/robot_painting/qmupd_vs/captured_image.jpg b/robot_painting/qmupd_vs/captured_image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c83661e9a2c93a4aacdf4a68b1fa871f61d7af7 Binary files /dev/null and b/robot_painting/qmupd_vs/captured_image.jpg differ diff --git a/robot_painting/qmupd_vs/data/__init__.py b/robot_painting/qmupd_vs/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cb618618fc301ce5440720ba62c899f6d4e7321 --- /dev/null +++ b/robot_painting/qmupd_vs/data/__init__.py @@ -0,0 +1,93 @@ +"""This package includes all the modules related to data loading and preprocessing + + To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. + You need to implement four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point from data loader. + -- : (optionally) add dataset-specific options and set default options. + +Now you can use the dataset class by specifying flag '--dataset_mode dummy'. +See our template dataset class 'template_dataset.py' for more details. +""" +import importlib +import torch.utils.data +from data.base_dataset import BaseDataset + + +def find_dataset_using_name(dataset_name): + """Import the module "data/[dataset_name]_dataset.py". + + In the file, the class called DatasetNameDataset() will + be instantiated. It has to be a subclass of BaseDataset, + and it is case-insensitive. + """ + dataset_filename = "data." + dataset_name + "_dataset" + datasetlib = importlib.import_module(dataset_filename) + + dataset = None + target_dataset_name = dataset_name.replace('_', '') + 'dataset' + for name, cls in datasetlib.__dict__.items(): + if name.lower() == target_dataset_name.lower() \ + and issubclass(cls, BaseDataset): + dataset = cls + + if dataset is None: + raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) + + return dataset + + +def get_option_setter(dataset_name): + """Return the static method of the dataset class.""" + dataset_class = find_dataset_using_name(dataset_name) + return dataset_class.modify_commandline_options + + +def create_dataset(opt): + """Create a dataset given the option. + + This function wraps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from data import create_dataset + >>> dataset = create_dataset(opt) + """ + data_loader = CustomDatasetDataLoader(opt) + dataset = data_loader.load_data() + return dataset + + +class CustomDatasetDataLoader(): + """Wrapper class of Dataset class that performs multi-threaded data loading""" + + def __init__(self, opt): + """Initialize this class + + Step 1: create a dataset instance given the name [dataset_mode] + Step 2: create a multi-threaded data loader. + """ + self.opt = opt + dataset_class = find_dataset_using_name(opt.dataset_mode) + self.dataset = dataset_class(opt) + print("dataset [%s] was created" % type(self.dataset).__name__) + self.dataloader = torch.utils.data.DataLoader( + self.dataset, + batch_size=opt.batch_size, + shuffle=not opt.serial_batches, + num_workers=int(opt.num_threads)) + + def load_data(self): + return self + + def __len__(self): + """Return the number of data in the dataset""" + return min(len(self.dataset), self.opt.max_dataset_size) + + def __iter__(self): + """Return a batch of data""" + for i, data in enumerate(self.dataloader): + if i * self.opt.batch_size >= self.opt.max_dataset_size: + break + yield data diff --git a/robot_painting/qmupd_vs/data/base_dataset.py b/robot_painting/qmupd_vs/data/base_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9b89ef0a4d00f3e3e43c3f07b95d318a72a1abc4 --- /dev/null +++ b/robot_painting/qmupd_vs/data/base_dataset.py @@ -0,0 +1,186 @@ +"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets. + +It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. +""" +import random +import numpy as np +import torch.utils.data as data +from PIL import Image +import torchvision.transforms as transforms +from abc import ABCMeta, abstractmethod + + +class BaseDataset(data.Dataset): + __metaclass__ = ABCMeta + """This class is an abstract base class (ABC) for datasets. + + To create a subclass, you need to implement the following four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point. + -- : (optionally) add dataset-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the class; save the options in the class + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + self.opt = opt + self.root = opt.dataroot + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def __len__(self): + """Return the total number of images in the dataset.""" + return 0 + + @abstractmethod + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index - - a random integer for data indexing + + Returns: + a dictionary of data with their names. It ususally contains the data itself and its metadata information. + """ + pass + + +def get_params(opt, size): + w, h = size + new_h = h + new_w = w + if opt.preprocess == 'resize_and_crop': + new_h = new_w = opt.load_size + elif opt.preprocess == 'scale_width_and_crop': + new_w = opt.load_size + new_h = opt.load_size * h // w + + x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) + y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) + + flip = random.random() > 0.5 + + return {'crop_pos': (x, y), 'flip': flip} + + +def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): + transform_list = [] + if grayscale: + transform_list.append(transforms.Grayscale(1)) + if 'resize' in opt.preprocess: + osize = [opt.load_size, opt.load_size] + transform_list.append(transforms.Resize(osize, method)) + elif 'scale_width' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) + + if 'crop' in opt.preprocess: + if params is None: + transform_list.append(transforms.RandomCrop(opt.crop_size)) + else: + transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) + + if opt.preprocess == 'none': + transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) + + if not opt.no_flip: + if params is None: + transform_list.append(transforms.RandomHorizontalFlip()) + elif params['flip']: + transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) + + if convert: + transform_list += [transforms.ToTensor()] + if grayscale: + transform_list += [transforms.Normalize((0.5,), (0.5,))] + else: + transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + return transforms.Compose(transform_list) + +def get_transform_mask(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): + transform_list = [] + if grayscale: + transform_list.append(transforms.Grayscale(1)) + if 'resize' in opt.preprocess: + osize = [opt.load_size, opt.load_size] + transform_list.append(transforms.Resize(osize, method)) + elif 'scale_width' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) + + if 'crop' in opt.preprocess: + if params is None: + transform_list.append(transforms.RandomCrop(opt.crop_size)) + else: + transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) + + if opt.preprocess == 'none': + transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) + + if not opt.no_flip: + if params is None: + transform_list.append(transforms.RandomHorizontalFlip()) + elif params['flip']: + transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) + + if convert: + transform_list += [transforms.ToTensor()] + return transforms.Compose(transform_list) + +def __make_power_2(img, base, method=Image.BICUBIC): + ow, oh = img.size + h = int(round(oh / base) * base) + w = int(round(ow / base) * base) + if (h == oh) and (w == ow): + return img + + __print_size_warning(ow, oh, w, h) + return img.resize((w, h), method) + + +def __scale_width(img, target_width, method=Image.BICUBIC): + ow, oh = img.size + if (ow == target_width): + return img + w = target_width + h = int(target_width * oh / ow) + return img.resize((w, h), method) + + +def __crop(img, pos, size): + ow, oh = img.size + x1, y1 = pos + tw = th = size + if (ow > tw or oh > th): + return img.crop((x1, y1, x1 + tw, y1 + th)) + return img + + +def __flip(img, flip): + if flip: + return img.transpose(Image.FLIP_LEFT_RIGHT) + return img + + +def __print_size_warning(ow, oh, w, h): + """Print warning information about image size(only print once)""" + if not hasattr(__print_size_warning, 'has_printed'): + print("The image size needs to be a multiple of 4. " + "The loaded image size was (%d, %d), so it was adjusted to " + "(%d, %d). This adjustment will be done to all images " + "whose sizes are not multiples of 4" % (ow, oh, w, h)) + __print_size_warning.has_printed = True diff --git a/robot_painting/qmupd_vs/data/image_folder.py b/robot_painting/qmupd_vs/data/image_folder.py new file mode 100644 index 0000000000000000000000000000000000000000..a9cea74d7e78cd05c728e5d6db4f858a19637883 --- /dev/null +++ b/robot_painting/qmupd_vs/data/image_folder.py @@ -0,0 +1,66 @@ +"""A modified image folder class + +We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) +so that this class can load images from both current directory and its subdirectories. +""" + +import torch.utils.data as data + +from PIL import Image +import os +import os.path + +IMG_EXTENSIONS = [ + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', +] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def make_dataset(dir, max_dataset_size=float("inf")): + images = [] + assert os.path.isdir(dir), '%s is not a valid directory' % dir + + for root, _, fnames in sorted(os.walk(dir)): + for fname in fnames: + if is_image_file(fname): + path = os.path.join(root, fname) + images.append(path) + return images[:min(max_dataset_size, len(images))] + + +def default_loader(path): + return Image.open(path).convert('RGB') + + +class ImageFolder(data.Dataset): + + def __init__(self, root, transform=None, return_paths=False, + loader=default_loader): + imgs = make_dataset(root) + if len(imgs) == 0: + raise(RuntimeError("Found 0 images in: " + root + "\n" + "Supported image extensions are: " + + ",".join(IMG_EXTENSIONS))) + + self.root = root + self.imgs = imgs + self.transform = transform + self.return_paths = return_paths + self.loader = loader + + def __getitem__(self, index): + path = self.imgs[index] + img = self.loader(path) + if self.transform is not None: + img = self.transform(img) + if self.return_paths: + return img, path + else: + return img + + def __len__(self): + return len(self.imgs) diff --git a/robot_painting/qmupd_vs/data/single_dataset.py b/robot_painting/qmupd_vs/data/single_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..052d10926048de1b679c65c04c1b4e44e7f8ab1a --- /dev/null +++ b/robot_painting/qmupd_vs/data/single_dataset.py @@ -0,0 +1,73 @@ +from data.base_dataset import BaseDataset, get_transform, get_params, get_transform_mask +from data.image_folder import make_dataset +from PIL import Image +import torch +import os, glob + + +class SingleDataset(BaseDataset): + """This dataset class can load a set of images specified by the path --dataroot /path/to/data. + + It can be used for generating CycleGAN results only for one side with the model option '-model test'. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + #self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) + imglistA = './datasets/list/%s/%s.txt' % (opt.phase+'A', opt.dataroot) + if os.path.exists(imglistA): + self.A_paths = sorted(open(imglistA, 'r').read().splitlines()) + else: + self.A_paths = sorted(glob.glob(opt.dataroot + '/*.*')) + self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc + #self.transform = get_transform(opt, grayscale=(input_nc == 1)) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index - - a random integer for data indexing + + Returns a dictionary that contains A and A_paths + A(tensor) - - an image in one domain + A_paths(str) - - the path of the image + """ + A_path = self.A_paths[index] + A_img = Image.open(A_path).convert('RGB') + transform_params_A = get_params(self.opt, A_img.size) + A = get_transform(self.opt, transform_params_A, grayscale=(self.input_nc == 1))(A_img) + item = {'A': A, 'A_paths': A_path} + + if self.opt.model == 'test_r1': + basenA = os.path.basename(A_path) + A_addchan_img = Image.open(os.path.join('./datasets/list/mask/A_all',basenA)) + A_addchan = get_transform_mask(self.opt, transform_params_A, grayscale=1)(A_addchan_img) + item['A_addchan'] = A_addchan + + if self.opt.style_control: + if self.opt.sinput == 'sind': + B_style = torch.Tensor([0.,0.,0.]) + B_style[self.opt.sind] = 1. + elif self.opt.sinput == 'svec': + if self.opt.svec[0] == '~': + self.opt.svec = '-'+self.opt.svec[1:] + ss = self.opt.svec.split(',') + B_style = torch.Tensor([float(ss[0]),float(ss[1]),float(ss[2])]) + elif self.opt.sinput == 'simg': + self.featureloc = os.path.join('style_features/styles2/', self.opt.sfeature_mode) + B_style = np.load(self.featureloc, self.opt.simg[:-4]+'.npy') + + B_style = B_style.view(3, 1, 1) + B_style = B_style.repeat(1, self.opt.crop_size//4, self.opt.crop_size//4) + item['B_style'] = B_style + + return item + + def __len__(self): + """Return the total number of images in the dataset.""" + return len(self.A_paths) diff --git a/robot_painting/qmupd_vs/data/unaligned_mask_stylecls_dataset.py b/robot_painting/qmupd_vs/data/unaligned_mask_stylecls_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..53b5c38d322e40e79ffb325d8d5b374946851e13 --- /dev/null +++ b/robot_painting/qmupd_vs/data/unaligned_mask_stylecls_dataset.py @@ -0,0 +1,159 @@ +import os.path +from data.base_dataset import BaseDataset, get_params, get_transform, get_transform_mask +from data.image_folder import make_dataset +from PIL import Image +import random +import torch +import torchvision.transforms as transforms +import numpy as np + + +class UnalignedMaskStyleClsDataset(BaseDataset): + """ + This dataset class can load unaligned/unpaired datasets. + + It requires two directories to host training images from domain A '/path/to/data/trainA' + and from domain B '/path/to/data/trainB' respectively. + You can train the model with the dataset flag '--dataroot /path/to/data'. + Similarly, you need to prepare two directories: + '/path/to/data/testA' and '/path/to/data/testB' during test time. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + + imglistA = './datasets/list/%s/%s.txt' % (opt.phase+'A', opt.dataroot) + imglistB = './datasets/list/%s/%s.txt' % (opt.phase+'B', opt.dataroot) + + if not os.path.exists(imglistA) or not os.path.exists(imglistB): + self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA' + self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB' + + self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' + self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' + else: + self.A_paths = sorted(open(imglistA, 'r').read().splitlines()) + self.B_paths = sorted(open(imglistB, 'r').read().splitlines()) + + self.A_size = len(self.A_paths) # get the size of dataset A + self.B_size = len(self.B_paths) # get the size of dataset B + print("A size:", self.A_size) + print("B size:", self.B_size) + btoA = self.opt.direction == 'BtoA' + self.input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image + self.output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image + + if opt.dataroot == '190613-4s': + self.softmaxloc = os.path.join('style_features/styles2/', '1vgg19_softmax') + elif opt.dataroot == '190613-4sn5': + self.softmaxloc = os.path.join('style_features/styles2_sn_equal/', '1vgg19_softmax') + elif '190613-4sn' in self.opt.dataroot: + self.softmaxloc = os.path.join('style_features/styles2_sn/', '1vgg19_softmax') + + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index (int) -- a random integer for data indexing + + Returns a dictionary that contains A, B, A_paths and B_paths + A (tensor) -- an image in the input domain + B (tensor) -- its corresponding image in the target domain + A_paths (str) -- image paths + B_paths (str) -- image paths + """ + A_path = self.A_paths[index % self.A_size] # make sure index is within then range + if self.opt.serial_batches: # make sure index is within then range + index_B = index % self.B_size + else: # randomize the index for domain B to avoid fixed pairs. + index_B = random.randint(0, self.B_size - 1) + B_path = self.B_paths[index_B] + A_img = Image.open(A_path).convert('RGB') + B_img = Image.open(B_path).convert('RGB') + + basenA = os.path.basename(A_path) + A_mask_img = Image.open(os.path.join('./datasets/list/mask/A',basenA)) + basenB = os.path.basename(B_path) + basenB2 = basenB.replace('_fake.png','.png') + # for added synthetic drawing + basenB2 = basenB2.replace('_style1.png','.png') + basenB2 = basenB2.replace('_style2.png','.png') + basenB2 = basenB2.replace('_style1single.png','.png') + basenB2 = basenB2.replace('_style2single.png','.png') + B_mask_img = Image.open(os.path.join('./datasets/list/mask/B',basenB2)) + if self.opt.use_eye_mask: + A_maske_img = Image.open(os.path.join('./datasets/list/mask/A_eyes',basenA)) + B_maske_img = Image.open(os.path.join('./datasets/list/mask/B_eyes',basenB2)) + if self.opt.use_lip_mask: + A_maskl_img = Image.open(os.path.join('./datasets/list/mask/A_lips',basenA)) + B_maskl_img = Image.open(os.path.join('./datasets/list/mask/B_lips',basenB2)) + if self.opt.metric_inmask: + A_maskfg_img = Image.open(os.path.join('./datasets/list/mask/A_fg',basenA)) + + # apply image transformation + transform_params_A = get_params(self.opt, A_img.size) + transform_params_B = get_params(self.opt, B_img.size) + A = get_transform(self.opt, transform_params_A, grayscale=(self.input_nc == 1))(A_img) + B = get_transform(self.opt, transform_params_B, grayscale=(self.output_nc == 1))(B_img) + A_mask = get_transform_mask(self.opt, transform_params_A, grayscale=1)(A_mask_img) + B_mask = get_transform_mask(self.opt, transform_params_B, grayscale=1)(B_mask_img) + if self.opt.use_eye_mask: + A_maske = get_transform_mask(self.opt, transform_params_A, grayscale=1)(A_maske_img) + B_maske = get_transform_mask(self.opt, transform_params_B, grayscale=1)(B_maske_img) + if self.opt.use_lip_mask: + A_maskl = get_transform_mask(self.opt, transform_params_A, grayscale=1)(A_maskl_img) + B_maskl = get_transform_mask(self.opt, transform_params_B, grayscale=1)(B_maskl_img) + if self.opt.metric_inmask: + A_maskfg = get_transform_mask(self.opt, transform_params_A, grayscale=1)(A_maskfg_img) + + item = {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path, 'A_mask': A_mask, 'B_mask': B_mask} + if self.opt.use_eye_mask: + item['A_maske'] = A_maske + item['B_maske'] = B_maske + if self.opt.use_lip_mask: + item['A_maskl'] = A_maskl + item['B_maskl'] = B_maskl + if self.opt.metric_inmask: + item['A_maskfg'] = A_maskfg + + + softmax = np.load(os.path.join(self.softmaxloc,basenB[:-4]+'.npy')) + softmax = torch.Tensor(softmax) + [maxv,index] = torch.max(softmax,0) + B_label = index + if len(self.opt.sfeature_mode) >= 8 and self.opt.sfeature_mode[-8:] == '_softmax': + if self.opt.one_hot: + B_style = torch.Tensor([0.,0.,0.]) + B_style[index] = 1. + else: + B_style = softmax + B_style = B_style.view(3, 1, 1) + B_style = B_style.repeat(1, 128, 128) + #print(index, index_B, torch.mean(B_style,(1,2))) + elif self.opt.sfeature_mode == 'domain': + B_style = B_label + item['B_style'] = B_style + item['B_label'] = B_label + if self.opt.isTrain and self.opt.style_loss_with_weight: + item['B_style0'] = softmax + if self.opt.isTrain and self.opt.metricvec: + vec = softmax + vec = vec.view(3, 1, 1) + vec = vec.repeat(1, 299, 299) + item['vec'] = vec + + return item + + def __len__(self): + """Return the total number of images in the dataset. + + As we have two datasets with potentially different number of images, + we take a maximum of + """ + return max(self.A_size, self.B_size) diff --git a/robot_painting/qmupd_vs/dataset_utils.py b/robot_painting/qmupd_vs/dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1969fe808261af564c605053001fee5f7ca563c9 --- /dev/null +++ b/robot_painting/qmupd_vs/dataset_utils.py @@ -0,0 +1,1226 @@ +import os +import math +import random +import scipy.io +import numpy as np +import tensorflow as tf +from PIL import Image + +from rasterization_utils.RealRenderer import GizehRasterizor as RealRenderer + + +def copy_hparams(hparams): + """Return a copy of an HParams instance.""" + return tf.contrib.training.HParams(**hparams.values()) + + +class GeneralRawDataLoader(object): + def __init__(self, + image_path, + raster_size, + test_dataset): + self.image_path = image_path + self.raster_size = raster_size + self.test_dataset = test_dataset + + def get_test_image(self, random_cursor=True, init_cursor_on_undrawn_pixel=False, init_cursor_num=1): + input_image_data, image_size_test = self.gen_input_images(self.image_path) + input_image_data = np.array(input_image_data, + dtype=np.float32) # (1, image_size, image_size, (3)), [0.0-strokes, 1.0-BG] + + return input_image_data, \ + self.gen_init_cursors(input_image_data, random_cursor, init_cursor_on_undrawn_pixel, init_cursor_num), \ + image_size_test + + def gen_input_images(self, image_path): + img = Image.open(image_path).convert('RGB') + height, width = img.height, img.width + max_dim = max(height, width) + + img = np.array(img, dtype=np.uint8) + + if height != width: + # Padding to a square image + if self.test_dataset == 'clean_line_drawings': + pad_value = [255, 255, 255] + elif self.test_dataset == 'faces': + pad_value = [0, 0, 0] + else: + # TODO: find better padding pixel value + pad_value = img[height - 10, width - 10] + + img_r, img_g, img_b = img[:, :, 0], img[:, :, 1], img[:, :, 2] + pad_width = max_dim - width + pad_height = max_dim - height + + pad_img_r = np.pad(img_r, ((0, pad_height), (0, pad_width)), 'constant', constant_values=pad_value[0]) + pad_img_g = np.pad(img_g, ((0, pad_height), (0, pad_width)), 'constant', constant_values=pad_value[1]) + pad_img_b = np.pad(img_b, ((0, pad_height), (0, pad_width)), 'constant', constant_values=pad_value[2]) + image_array = np.stack([pad_img_r, pad_img_g, pad_img_b], axis=-1) + else: + image_array = img + + if self.test_dataset == 'faces' and max_dim != 256: + image_array_resize = Image.fromarray(image_array, 'RGB') + image_array_resize = image_array_resize.resize(size=(256, 256), resample=Image.BILINEAR) + image_array = np.array(image_array_resize, dtype=np.uint8) + + assert image_array.shape[0] == image_array.shape[1] + img_size = image_array.shape[0] + image_array = image_array.astype(np.float32) + if self.test_dataset == 'clean_line_drawings': + image_array = image_array[:, :, 0] / 255.0 # [0.0-stroke, 1.0-BG] + else: + image_array = image_array / 255.0 # [0.0-stroke, 1.0-BG] + image_array = np.expand_dims(image_array, axis=0) + return image_array, img_size + + def crop_patch(self, image, center, image_size, crop_size): + x0 = center[0] - crop_size // 2 + x1 = x0 + crop_size + y0 = center[1] - crop_size // 2 + y1 = y0 + crop_size + x0 = max(0, min(x0, image_size)) + y0 = max(0, min(y0, image_size)) + x1 = max(0, min(x1, image_size)) + y1 = max(0, min(y1, image_size)) + patch = image[y0:y1, x0:x1] + return patch + + def gen_init_cursor_single(self, sketch_image, init_cursor_on_undrawn_pixel, misalign_size=3): + # sketch_image: [0.0-stroke, 1.0-BG] + image_size = sketch_image.shape[0] + if np.sum(1.0 - sketch_image) == 0: + center = np.zeros((2), dtype=np.int32) + return center + else: + while True: + center = np.random.randint(0, image_size, size=(2)) # (2), in large size + patch = 1.0 - self.crop_patch(sketch_image, center, image_size, self.raster_size) + if np.sum(patch) != 0: + if not init_cursor_on_undrawn_pixel: + return center.astype(np.float32) / float(image_size) # (2), in size [0.0, 1.0) + else: + center_patch = 1.0 - self.crop_patch(sketch_image, center, image_size, misalign_size) + if np.sum(center_patch) != 0: + return center.astype(np.float32) / float(image_size) # (2), in size [0.0, 1.0) + + def gen_init_cursors(self, sketch_data, random_pos=True, init_cursor_on_undrawn_pixel=False, init_cursor_num=1): + init_cursor_batch_list = [] + for cursor_i in range(init_cursor_num): + if random_pos: + init_cursor_batch = [] + for i in range(len(sketch_data)): + sketch_image = sketch_data[i].copy().astype(np.float32) # [0.0-stroke, 1.0-BG] + center = self.gen_init_cursor_single(sketch_image, init_cursor_on_undrawn_pixel) + init_cursor_batch.append(center) + + init_cursor_batch = np.stack(init_cursor_batch, axis=0) # (N, 2) + else: + raise Exception('Not finished') + init_cursor_batch_list.append(init_cursor_batch) + + if init_cursor_num == 1: + init_cursor_batch = init_cursor_batch_list[0] + init_cursor_batch = np.expand_dims(init_cursor_batch, axis=1).astype(np.float32) # (N, 1, 2) + else: + init_cursor_batch = np.stack(init_cursor_batch_list, axis=1) # (N, init_cursor_num, 2) + init_cursor_batch = np.expand_dims(init_cursor_batch, axis=2).astype( + np.float32) # (N, init_cursor_num, 1, 2) + + return init_cursor_batch + + +def load_dataset_testing(test_data_base_dir, test_dataset, test_img_name, model_params): + assert test_dataset in ['clean_line_drawings', 'rough_sketches', 'faces'] + # img_path = os.path.join(test_data_base_dir, test_dataset, test_img_name) + # print('Loaded {} from {}'.format(img_path, test_dataset)) + img_path = os.path.join(test_data_base_dir, test_img_name) + # print('Loaded {} from {}'.format(img_path)) + + eval_model_params = copy_hparams(model_params) + eval_model_params.use_input_dropout = 0 + eval_model_params.use_recurrent_dropout = 0 + eval_model_params.use_output_dropout = 0 + eval_model_params.batch_size = 1 + eval_model_params.model_mode = 'sample' + + sample_model_params = copy_hparams(eval_model_params) + sample_model_params.batch_size = 1 # only sample one at a time + sample_model_params.max_seq_len = 1 # sample one point at a time + + test_set = GeneralRawDataLoader(img_path, eval_model_params.raster_size, test_dataset=test_dataset) + + result = [test_set, eval_model_params, sample_model_params] + return result + + +class GeneralMultiObjectDataLoader(object): + def __init__(self, + stroke3_data, + batch_size, + raster_size, + image_size_small, + image_size_large, + is_bin, + is_train): + self.batch_size = batch_size # minibatch size + self.raster_size = raster_size + self.image_size_small = image_size_small + self.image_size_large = image_size_large + self.is_bin = is_bin + self.is_train = is_train + + self.num_batches = len(stroke3_data) // self.batch_size + self.batch_idx = -1 + print('batch_size', batch_size, ', num_batches', self.num_batches) + + self.rasterizor = RealRenderer() + self.memory_sketch_data_batch = [] + + assert type(stroke3_data) is list + self.preprocess_rand_data(stroke3_data) + + def preprocess_rand_data(self, stroke3): + if self.is_train: + random.shuffle(stroke3) + self.stroke3_data = stroke3 + + def cal_dist(self, posA, posB): + return np.sqrt(np.sum(np.power(posA - posB, 2))) + + def invalid_position(self, pos, obj_size, pos_list, size_list): + if len(pos_list) == 0: + return False + + pos_a = pos + size_a = obj_size + for i in range(len(pos_list)): + pos_b = pos_list[i] + size_b = size_list[i] + + if self.cal_dist(pos_a, pos_b) < ((size_a + size_b) // 4): + return True + + return False + + def get_object_info(self, image_size, vary_thickness=True, try_total_times=3): + if image_size <= 172: + obj_num = 1 + obj_thickness_list = [3] + elif image_size <= 225: + obj_num = random.randint(1, 2) + obj_thickness_list = np.random.randint(3, 4 + 1, size=(obj_num)) + elif image_size <= 278: + obj_num = 2 + obj_thickness_list = np.random.randint(3, 4 + 1, size=(obj_num)) + elif image_size <= 331: + obj_num = random.randint(2, 3) + while True: + obj_thickness_list = np.random.randint(3, 5 + 1, size=(obj_num)) + if np.sum(obj_thickness_list) / obj_num != 5 and np.sum(obj_thickness_list) < 13: + break + elif image_size <= 384: + obj_num = 3 + while True: + obj_thickness_list = np.random.randint(3, 5 + 1, size=(obj_num)) + if np.sum(obj_thickness_list) / obj_num != 5 and np.sum(obj_thickness_list) < 13: + break + else: + raise Exception('Invalid image_size', image_size) + + if not vary_thickness: + num_item = len(obj_thickness_list) + obj_thickness_list = [3 for _ in range(num_item)] + + obj_pos_list = [] + obj_size_list = [] + if obj_num == 1: + obj_size_list.append(image_size) + center = (image_size // 2, image_size // 2) + obj_pos_list.append(center) + else: + for obj_i in range(obj_num): + for try_i in range(try_total_times): + obj_size = random.randint(128, image_size * 3 // 4) + obj_center = np.random.randint(obj_size // 3, image_size - (obj_size // 3) + 1, size=(2)) + + if not self.invalid_position(obj_center, obj_size, obj_pos_list, + obj_size_list) or try_i == try_total_times - 1: + obj_pos_list.append(obj_center) + obj_size_list.append(obj_size) + break + + assert len(obj_size_list) == len(obj_pos_list) == len(obj_thickness_list) == obj_num + return obj_num, obj_size_list, obj_pos_list, obj_thickness_list + + def object_pasting(self, obj_img, canvas_img, center): + c_y, c_x = center[0], center[1] + obj_size = obj_img.shape[0] + canvas_size = canvas_img.shape[0] + box_left = max(0, c_x - obj_size // 2) + box_right = min(canvas_size, c_x + obj_size // 2) + box_up = max(0, c_y - obj_size // 2) + box_bottom = min(canvas_size, c_y + obj_size // 2) + + box_canvas = canvas_img[box_up: box_bottom, box_left: box_right] + + obj_box_up = box_up - (c_y - obj_size // 2) + obj_box_left = box_left - (c_x - obj_size // 2) + box_obj = obj_img[obj_box_up: obj_box_up + (box_bottom - box_up), + obj_box_left: obj_box_left + (box_right - box_left)] + + box_canvas += box_obj + + rst_canvas = np.copy(canvas_img) + rst_canvas[box_up: box_bottom, box_left: box_right] = box_canvas + rst_canvas = np.clip(rst_canvas, 0.0, 1.0) + + return rst_canvas + + def get_multi_object_image(self, img_size, vary_thickness): + object_num, object_size_list, object_pos_list, object_thickness_list = self.get_object_info( + img_size, vary_thickness=vary_thickness) + + canvas = np.zeros(shape=(img_size, img_size), dtype=np.float32) + + for obj_i in range(object_num): + rand_idx = np.random.randint(0, len(self.stroke3_data)) + rand_stroke3 = self.stroke3_data[rand_idx] # (N_points, 3) + + object_size = object_size_list[obj_i] + object_enter = object_pos_list[obj_i] + object_thickness = object_thickness_list[obj_i] + + stroke_image = self.gen_stroke_images([rand_stroke3], object_size, object_thickness) + stroke_image = 1.0 - stroke_image[0] # (image_size, image_size), [0.0-BG, 1.0-strokes] + + canvas = self.object_pasting(stroke_image, canvas, object_enter) # [0.0-BG, 1.0-strokes] + + canvas = 1.0 - canvas # [0.0-strokes, 1.0-BG] + return canvas + + def get_batch_from_memory(self, memory_idx, vary_thickness, fixed_image_size=-1, random_cursor=True, + init_cursor_on_undrawn_pixel=False, init_cursor_num=1): + if len(self.memory_sketch_data_batch) >= memory_idx + 1: + sketch_data_batch = self.memory_sketch_data_batch[memory_idx] + sketch_data_batch = np.expand_dims(sketch_data_batch, + axis=0) # (1, image_size, image_size), [0.0-strokes, 1.0-BG] + image_size_rand = sketch_data_batch.shape[1] + else: + if fixed_image_size == -1: + image_size_rand = random.randint(self.image_size_small, self.image_size_large) + else: + image_size_rand = fixed_image_size + + multi_obj_image = self.get_multi_object_image(image_size_rand, vary_thickness) # [0.0-strokes, 1.0-BG] + self.memory_sketch_data_batch.append(multi_obj_image) + sketch_data_batch = np.expand_dims(multi_obj_image, + axis=0) # (1, image_size, image_size), [0.0-strokes, 1.0-BG] + + return None, sketch_data_batch, \ + self.gen_init_cursors(sketch_data_batch, random_cursor, init_cursor_on_undrawn_pixel, init_cursor_num), \ + image_size_rand + + def get_batch_multi_res(self, loop_num, vary_thickness, random_cursor=True, + init_cursor_on_undrawn_pixel=False, init_cursor_num=1): + sketch_data_batch = [] + init_cursors_batch = [] + image_size_batch = [] + batch_size_per_loop = self.batch_size // loop_num + for loop_i in range(loop_num): + image_size_rand = random.randint(self.image_size_small, self.image_size_large) + sketch_data_sub_batch = [] + for batch_i in range(batch_size_per_loop): + multi_obj_image = self.get_multi_object_image(image_size_rand, vary_thickness) # [0.0-strokes, 1.0-BG] + sketch_data_sub_batch.append(multi_obj_image) + sketch_data_sub_batch = np.stack(sketch_data_sub_batch, + axis=0) # (N, image_size, image_size), [0.0-strokes, 1.0-BG] + + init_cursors_sub_batch = self.gen_init_cursors(sketch_data_sub_batch, random_cursor, + init_cursor_on_undrawn_pixel, init_cursor_num) + sketch_data_batch.append(sketch_data_sub_batch) + init_cursors_batch.append(init_cursors_sub_batch) + image_size_batch.append(image_size_rand) + + return None, \ + sketch_data_batch, \ + init_cursors_batch, \ + image_size_batch + + def gen_stroke_images(self, stroke3_list, image_size, stroke_width): + """ + :param stroke3_list: list of (batch_size,), each with (N_points, 3) + :param image_size: + :return: + """ + gt_image_array = self.rasterizor.raster_func(stroke3_list, image_size, stroke_width=stroke_width, + is_bin=self.is_bin, version='v2') + gt_image_array = np.stack(gt_image_array, axis=0) + gt_image_array = 1.0 - gt_image_array # (batch_size, image_size, image_size), [0.0-strokes, 1.0-BG] + return gt_image_array + + def crop_patch(self, image, center, image_size, crop_size): + x0 = center[0] - crop_size // 2 + x1 = x0 + crop_size + y0 = center[1] - crop_size // 2 + y1 = y0 + crop_size + x0 = max(0, min(x0, image_size)) + y0 = max(0, min(y0, image_size)) + x1 = max(0, min(x1, image_size)) + y1 = max(0, min(y1, image_size)) + patch = image[y0:y1, x0:x1] + return patch + + def gen_init_cursor_single(self, sketch_image, init_cursor_on_undrawn_pixel, misalign_size=3): + # sketch_image: [0.0-stroke, 1.0-BG] + image_size = sketch_image.shape[0] + if np.sum(1.0 - sketch_image) == 0: + center = np.zeros((2), dtype=np.int32) + return center + else: + while True: + center = np.random.randint(0, image_size, size=(2)) # (2), in large size + patch = 1.0 - self.crop_patch(sketch_image, center, image_size, self.raster_size) + if np.sum(patch) != 0: + if not init_cursor_on_undrawn_pixel: + return center.astype(np.float32) / float(image_size) # (2), in size [0.0, 1.0) + else: + center_patch = 1.0 - self.crop_patch(sketch_image, center, image_size, misalign_size) + if np.sum(center_patch) != 0: + return center.astype(np.float32) / float(image_size) # (2), in size [0.0, 1.0) + + def gen_init_cursors(self, sketch_data, random_pos=True, init_cursor_on_undrawn_pixel=False, init_cursor_num=1): + init_cursor_batch_list = [] + for cursor_i in range(init_cursor_num): + if random_pos: + init_cursor_batch = [] + for i in range(len(sketch_data)): + sketch_image = sketch_data[i].copy().astype(np.float32) # [0.0-stroke, 1.0-BG] + center = self.gen_init_cursor_single(sketch_image, init_cursor_on_undrawn_pixel) + init_cursor_batch.append(center) + + init_cursor_batch = np.stack(init_cursor_batch, axis=0) # (N, 2) + else: + raise Exception('Not finished') + init_cursor_batch_list.append(init_cursor_batch) + + if init_cursor_num == 1: + init_cursor_batch = init_cursor_batch_list[0] + init_cursor_batch = np.expand_dims(init_cursor_batch, axis=1).astype(np.float32) # (N, 1, 2) + else: + init_cursor_batch = np.stack(init_cursor_batch_list, axis=1) # (N, init_cursor_num, 2) + init_cursor_batch = np.expand_dims(init_cursor_batch, axis=2).astype( + np.float32) # (N, init_cursor_num, 1, 2) + + return init_cursor_batch + + +def load_dataset_multi_object(dataset_base_dir, model_params): + train_stroke3_data = [] + val_stroke3_data = [] + + if model_params.data_set == 'clean_line_drawings': + def load_qd_npz_data(npz_path): + data = np.load(npz_path, encoding='latin1', allow_pickle=True) + selected_strokes3 = data['stroke3'] # (N_sketches,), each with (N_points, 3) + selected_strokes3 = selected_strokes3.tolist() + return selected_strokes3 + + base_dir_clean = 'QuickDraw-clean' + cates = ['airplane', 'bus', 'car', 'sailboat', 'bird', 'cat', 'dog', + # 'rabbit', + 'tree', 'flower', + # 'circle', 'line', + 'zigzag' + ] + + for cate in cates: + train_cate_sketch_data_npz_path = os.path.join(dataset_base_dir, base_dir_clean, 'train', cate + '.npz') + val_cate_sketch_data_npz_path = os.path.join(dataset_base_dir, base_dir_clean, 'test', cate + '.npz') + print(train_cate_sketch_data_npz_path) + + train_cate_stroke3_data = load_qd_npz_data( + train_cate_sketch_data_npz_path) # list of (N_sketches,), each with (N_points, 3) + val_cate_stroke3_data = load_qd_npz_data(val_cate_sketch_data_npz_path) + train_stroke3_data += train_cate_stroke3_data + val_stroke3_data += val_cate_stroke3_data + else: + raise Exception('Unknown data type:', model_params.data_set) + + print('Loaded {}/{} from {}'.format(len(train_stroke3_data), len(val_stroke3_data), model_params.data_set)) + print('model_params.max_seq_len %i.' % model_params.max_seq_len) + + eval_sample_model_params = copy_hparams(model_params) + eval_sample_model_params.use_input_dropout = 0 + eval_sample_model_params.use_recurrent_dropout = 0 + eval_sample_model_params.use_output_dropout = 0 + eval_sample_model_params.batch_size = 1 # only sample one at a time + eval_sample_model_params.model_mode = 'eval_sample' + + train_set = GeneralMultiObjectDataLoader(train_stroke3_data, + model_params.batch_size, model_params.raster_size, + model_params.image_size_small, model_params.image_size_large, + model_params.bin_gt, is_train=True) + val_set = GeneralMultiObjectDataLoader(val_stroke3_data, + eval_sample_model_params.batch_size, eval_sample_model_params.raster_size, + eval_sample_model_params.image_size_small, + eval_sample_model_params.image_size_large, + eval_sample_model_params.bin_gt, is_train=False) + + result = [train_set, val_set, model_params, eval_sample_model_params] + return result + + +class GeneralDataLoaderMultiObjectRough(object): + def __init__(self, + photo_data, + sketch_data, + texture_data, + shadow_data, + batch_size, + raster_size, + image_size_small, + image_size_large, + is_train): + self.batch_size = batch_size # minibatch size + self.raster_size = raster_size + self.image_size_small = image_size_small + self.image_size_large = image_size_large + self.is_train = is_train + + assert photo_data is not None + assert len(photo_data) == len(sketch_data) + # self.num_batches = len(sketch_data) // self.batch_size + self.batch_idx = -1 + print('batch_size', batch_size) + + assert type(photo_data) is list + assert type(sketch_data) is list + assert type(texture_data) is list and len(texture_data) > 0 + assert type(shadow_data) is list and len(shadow_data) > 0 + self.photo_data = photo_data + self.sketch_data = sketch_data + self.texture_data = texture_data # list of (H, W, 3), [0, 255], uint8 + self.shadow_data = shadow_data # list of (H, W), [0, 255], uint8 + + self.memory_photo_data_batch = [] + self.memory_sketch_data_batch = [] + + def rough_augmentation(self, raw_photo, texture_prob=0.20, noise_prob=0.15, shadow_prob=0.20): + # raw_photo: (H, W), [0.0-stroke, 1.0-BG] + aug_photo_rgb = np.stack([raw_photo for _ in range(3)], axis=-1) + + def texture_generation(texture_list, image_shape): + while True: + random_texture_id = random.randint(0, len(texture_list) - 1) + texture_large = texture_list[random_texture_id] + t_w, t_h = texture_large.shape[1], texture_large.shape[0] + i_w, i_h = image_shape[1], image_shape[0] + + if t_h >= i_h and t_w >= i_w: + texture_large = np.copy(texture_large).astype(np.float32) + crop_y = random.randint(0, t_h - i_h) + crop_x = random.randint(0, t_w - i_w) + crop_texture = texture_large[crop_y: crop_y + i_h, crop_x: crop_x + i_w, :] + return crop_texture + + def texture_change(rough_img_, all_textures): + # rough_img_: (H, W, 3), [0.0-stroke, 1.0-BG] + + texture_image = texture_generation(all_textures, rough_img_.shape) # (h, w, 3) + texture_image /= 255.0 + + rand_b = np.random.uniform(1.0, 2.0, size=rough_img_.shape) + textured_img = rough_img_ * (texture_image / rand_b + (rand_b - 1.0) / rand_b) # [0.0, 1.0] + return textured_img + + def noise_change(rough_img_, noise_scale=25): + # rough_img_: (H, W, 3), [0.0, 1.0] + rough_img_255 = rough_img_ * 255.0 + + rand_noise = np.random.uniform(-1.0, 1.0, size=rough_img_255.shape) * noise_scale + # rand_noise = np.random.normal(size=rough_img.shape) * noise_scale + noise_img = rough_img_255 + rand_noise + noise_img = np.clip(noise_img, 0.0, 255.0) + noise_img /= 255.0 + return noise_img + + def shadow_change(rough_img_, all_shadows): + # rough_img_: (H, W, 3), [0.0, 1.0] + rough_img_255 = rough_img_ * 255.0 + + shadow_i = random.randint(0, len(all_shadows) - 1) + shadow_full = all_shadows[shadow_i] # (H, W), [0, 255] + shadow_img_size = shadow_full.shape[0] + + while True: + position = np.random.randint(-shadow_img_size // 2, shadow_img_size // 2, (2)) + if abs(position[0]) > (shadow_img_size // 8) and abs(position[1]) > (shadow_img_size // 8): + break + position += (shadow_img_size // 2) + + crop_up = shadow_img_size - position[0] + crop_left = shadow_img_size - position[1] + + shadow_image_large = shadow_full[crop_up: crop_up + shadow_img_size, crop_left: crop_left + shadow_img_size] + shadow_bg = Image.fromarray(shadow_image_large, 'L') + shadow_bg = shadow_bg.resize(size=(rough_img_255.shape[1], rough_img_255.shape[0]), resample=Image.BILINEAR) + shadow_bg = np.array(shadow_bg, dtype=np.float32) / 255.0 # [0.0-shadow, 1.0-BG] + shadow_bg = np.stack([shadow_bg for _ in range(3)], axis=-1) + + shadow_img = rough_img_255 * shadow_bg + shadow_img /= 255.0 + return shadow_img + + if random.random() <= texture_prob: + aug_photo_rgb = texture_change(aug_photo_rgb, self.texture_data) # (H, W, 3), [0.0, 1.0] + if random.random() <= noise_prob: + aug_photo_rgb = noise_change(aug_photo_rgb) # (H, W, 3), [0.0, 1.0] + if random.random() <= shadow_prob: + aug_photo_rgb = shadow_change(aug_photo_rgb, self.shadow_data) # (H, W, 3), [0.0, 1.0] + + return aug_photo_rgb + + def image_interpolation(self, photo, sketch, photo_prob): + interp_photo = photo * photo_prob + sketch * (1.0 - photo_prob) + interp_photo = np.clip(interp_photo, 0.0, 1.0) + return interp_photo + + def get_batch_from_memory(self, memory_idx, interpolate_type, fixed_image_size=-1, random_cursor=True, + photo_prob=1.0, init_cursor_num=1): + if len(self.memory_sketch_data_batch) >= memory_idx + 1: + photo_data_batch = self.memory_photo_data_batch[memory_idx] + sketch_data_batch = self.memory_sketch_data_batch[memory_idx] + image_size_rand = sketch_data_batch.shape[1] + else: + if fixed_image_size == -1: + image_size_rand = random.randint(self.image_size_small, self.image_size_large) + else: + image_size_rand = fixed_image_size + + # photo_prob = 0.0 if photo_prob_type == 'zero' else 1.0 + photo_data_batch, sketch_data_batch = self.select_sketch( + image_size_rand) # both: (H, W), [0.0-stroke, 1.0-BG] + photo_data_batch = self.rough_augmentation(photo_data_batch) # (H, W, 3), [0.0-stroke, 1.0-BG] + + self.memory_photo_data_batch.append(photo_data_batch) + self.memory_sketch_data_batch.append(sketch_data_batch) + + if interpolate_type == 'prob': + if random.random() >= photo_prob: + photo_data_batch = np.stack([sketch_data_batch for _ in range(3)], + axis=-1) # (H, W, 3), [0.0-stroke, 1.0-BG] + elif interpolate_type == 'image': + photo_data_batch = self.image_interpolation( + photo_data_batch, np.stack([sketch_data_batch for _ in range(3)], axis=-1), photo_prob) + else: + raise Exception('Unknown interpolate_type', interpolate_type) + + photo_data_batch = np.expand_dims(photo_data_batch, axis=0) # (1, image_size, image_size, 3) + sketch_data_batch = np.expand_dims(sketch_data_batch, + axis=0) # (1, image_size, image_size), [0.0-strokes, 1.0-BG] + + return photo_data_batch, sketch_data_batch, \ + self.gen_init_cursors(sketch_data_batch, random_cursor, init_cursor_num), image_size_rand + + def select_sketch(self, image_size_rand): + resolution_idx = image_size_rand - self.image_size_small + img_idx = random.randint(0, len(self.sketch_data[resolution_idx]) - 1) + assert img_idx != -1 + + selected_sketch = self.sketch_data[resolution_idx][img_idx] # [0-stroke, 255-BG], uint8 + selected_photo = self.photo_data[resolution_idx][img_idx] # [0-stroke, 255-BG], uint8 + + rst_sketch_image = selected_sketch.astype(np.float32) / 255.0 # [0.0-stroke, 1.0-BG] + rst_photo_image = selected_photo.astype(np.float32) / 255.0 # [0.0-stroke, 1.0-BG] + + return rst_photo_image, rst_sketch_image + + def get_batch_multi_res(self, loop_num, interpolate_type, random_cursor=True, init_cursor_num=1, photo_prob=1.0): + photo_data_batch = [] + sketch_data_batch = [] + init_cursors_batch = [] + image_size_batch = [] + batch_size_per_loop = self.batch_size // loop_num + for loop_i in range(loop_num): + image_size_rand = random.randint(self.image_size_small, self.image_size_large) + + photo_data_sub_batch = [] + sketch_data_sub_batch = [] + for img_i in range(batch_size_per_loop): + photo_patch, sketch_patch = self.select_sketch(image_size_rand) # both: (H, W), [0.0-stroke, 1.0-BG] + photo_patch = self.rough_augmentation(photo_patch) # (H, W, 3), [0.0-stroke, 1.0-BG] + + if interpolate_type == 'prob': + if random.random() >= photo_prob: + photo_patch = np.stack([sketch_patch for _ in range(3)], + axis=-1) # (H, W, 3), [0.0-stroke, 1.0-BG] + elif interpolate_type == 'image': + photo_patch = self.image_interpolation( + photo_patch, np.stack([sketch_patch for _ in range(3)], axis=-1), photo_prob) + else: + raise Exception('Unknown interpolate_type', interpolate_type) + + photo_data_sub_batch.append(photo_patch) + sketch_data_sub_batch.append(sketch_patch) + + photo_data_sub_batch = np.stack(photo_data_sub_batch, + axis=0) # (N, image_size, image_size, 3), [0.0-strokes, 1.0-BG] + sketch_data_sub_batch = np.stack(sketch_data_sub_batch, + axis=0) # (N, image_size, image_size), [0.0-strokes, 1.0-BG] + init_cursors_sub_batch = self.gen_init_cursors(sketch_data_sub_batch, random_cursor, init_cursor_num) + photo_data_batch.append(photo_data_sub_batch) + sketch_data_batch.append(sketch_data_sub_batch) + init_cursors_batch.append(init_cursors_sub_batch) + image_size_batch.append(image_size_rand) + + return photo_data_batch, sketch_data_batch, init_cursors_batch, image_size_batch + + def crop_patch(self, image, center, image_size, crop_size): + x0 = center[0] - crop_size // 2 + x1 = x0 + crop_size + y0 = center[1] - crop_size // 2 + y1 = y0 + crop_size + x0 = max(0, min(x0, image_size)) + y0 = max(0, min(y0, image_size)) + x1 = max(0, min(x1, image_size)) + y1 = max(0, min(y1, image_size)) + patch = image[y0:y1, x0:x1] + return patch + + def gen_init_cursor_single(self, sketch_image): + # sketch_image: [0.0-stroke, 1.0-BG] + image_size = sketch_image.shape[0] + if np.sum(1.0 - sketch_image) == 0: + center = np.zeros((2), dtype=np.int32) + return center + else: + while True: + center = np.random.randint(0, image_size, size=(2)) # (2), in large size + patch = 1.0 - self.crop_patch(sketch_image, center, image_size, self.raster_size) + if np.sum(patch) != 0: + return center.astype(np.float32) / float(image_size) # (2), in size [0.0, 1.0) + + def gen_init_cursors(self, sketch_data, random_pos=True, init_cursor_num=1): + init_cursor_batch_list = [] + for cursor_i in range(init_cursor_num): + if random_pos: + init_cursor_batch = [] + for i in range(len(sketch_data)): + sketch_image = sketch_data[i].copy().astype(np.float32) # [0.0-stroke, 1.0-BG] + center = self.gen_init_cursor_single(sketch_image) + init_cursor_batch.append(center) + + init_cursor_batch = np.stack(init_cursor_batch, axis=0) # (N, 2) + else: + raise Exception('Not finished') + init_cursor_batch_list.append(init_cursor_batch) + + if init_cursor_num == 1: + init_cursor_batch = init_cursor_batch_list[0] + init_cursor_batch = np.expand_dims(init_cursor_batch, axis=1).astype(np.float32) # (N, 1, 2) + else: + init_cursor_batch = np.stack(init_cursor_batch_list, axis=1) # (N, init_cursor_num, 2) + init_cursor_batch = np.expand_dims(init_cursor_batch, axis=2).astype( + np.float32) # (N, init_cursor_num, 1, 2) + + return init_cursor_batch + + +def load_dataset_multi_object_rough(dataset_base_dir, model_params): + train_photo_data = [] + train_sketch_data = [] + val_photo_data = [] + val_sketch_data = [] + texture_data = [] + shadow_data = [] + + if model_params.data_set == 'rough_sketches': + base_dir_rough = 'QuickDraw-rough' + + def load_sketch_data(mat_path): + sketch_data_mat = scipy.io.loadmat(mat_path) + sketch_data = sketch_data_mat['sketch_array'] + sketch_data = np.array(sketch_data, dtype=np.uint8) # (N, resolution, resolution), [0-strokes, 255-BG] + return sketch_data + + def load_photo_data(mat_path): + photo_data_mat = scipy.io.loadmat(mat_path) + photo_data = photo_data_mat['image_array'] + photo_data = np.array(photo_data, dtype=np.uint8) # (N, resolution, resolution), [0-strokes, 255-BG] + return photo_data + + def load_normal_data(img_path): + assert '.png' in img_path or '.jpg' + img = Image.open(img_path).convert('RGB') + img = np.array(img, dtype=np.uint8) # (H, W, 3), [0-stroke, 255-BG], uint8 + return img + + ## Texture + texture_base = os.path.join(dataset_base_dir, base_dir_rough, 'texture') + all_texture = os.listdir(texture_base) + all_texture.sort() + + for file_name in all_texture: + texture_path = os.path.join(texture_base, file_name) + texture_uint8 = load_normal_data(texture_path) + texture_data.append(texture_uint8) + + ## shadow + def process_angle(img, temp_size): + padded_img = img.copy() + padded_img[0, 0:temp_size] -= 1 + padded_img[0, -(temp_size + 1):-1] -= 1 + padded_img[-1, 0:temp_size] -= 1 + padded_img[-1, -(temp_size + 1):-1] -= 1 + + padded_img[0:temp_size, 0] -= 1 + padded_img[0:temp_size, -1] -= 1 + padded_img[-(temp_size + 1):-1, 0] -= 1 + padded_img[-(temp_size + 1):-1, -1] -= 1 + return padded_img + + def pad_img(ori_img, pad_value): + padded_img = np.pad(ori_img, 1, constant_values=pad_value) + img_h, img_w = padded_img.shape[0], padded_img.shape[1] + + temp_size = img_h // 3 + padded_img = process_angle(padded_img, temp_size) + + temp_size = img_h // 9 + padded_img = process_angle(padded_img, temp_size) + + temp_size = img_h // 15 + padded_img = process_angle(padded_img, temp_size) + + temp_size = img_h // 21 + padded_img = process_angle(padded_img, temp_size) + + padded_img = np.clip(padded_img, 0, 255) + + return padded_img + + def shadow_generation(transparency, shadow_img_size=1024): + deepest_value = int(255 * transparency) + + center_patch = np.zeros((shadow_img_size // 2, shadow_img_size // 2), dtype=np.uint8) + center_patch.fill(255) + + pad_gap = shadow_img_size // 2 + shadow_patch = center_patch.copy() + for i in range(pad_gap): + curr_pad_value = 255.0 - float(255.0 - deepest_value) / float(pad_gap) * (i + 1) + shadow_patch = pad_img(shadow_patch, pad_value=curr_pad_value) + + for i in range(shadow_img_size // 4): + shadow_patch = pad_img(shadow_patch, pad_value=deepest_value) + + assert shadow_patch.shape[0] == shadow_img_size * 2, shadow_patch.shape[0] + return shadow_patch + + for transparency_ in range(90, 95 + 1): + transparency = transparency_ / 100.0 + shadow_full = shadow_generation(transparency) + shadow_data.append(shadow_full) + + splits = ['train', 'test'] + + resolutions = [model_params.image_size_small, model_params.image_size_large] + + for resolution in range(resolutions[0], resolutions[1] + 1): + for split in splits: + sketch_mat1_path = os.path.join(dataset_base_dir, base_dir_rough, 'model_pencil1', + 'sketch', split, 'res_' + str(resolution) + '.mat') + photo_mat1_path = os.path.join(dataset_base_dir, base_dir_rough, 'model_pencil1', + 'photo', split, 'res_' + str(resolution) + '.mat') + sketch_data1_uint8 = load_sketch_data( + sketch_mat1_path) # (N, resolution, resolution), [0-strokes, 255-BG] + photo_data1_uint8 = load_photo_data(photo_mat1_path) # (N, resolution, resolution), [0-strokes, 255-BG] + + sketch_mat2_path = os.path.join(dataset_base_dir, base_dir_rough, 'model_pencil2', + 'sketch', split, 'res_' + str(resolution) + '.mat') + photo_mat2_path = os.path.join(dataset_base_dir, base_dir_rough, 'model_pencil2', + 'photo', split, 'res_' + str(resolution) + '.mat') + sketch_data2_uint8 = load_sketch_data( + sketch_mat2_path) # (N, resolution, resolution), [0-strokes, 255-BG] + photo_data2_uint8 = load_photo_data(photo_mat2_path) # (N, resolution, resolution), [0-strokes, 255-BG] + + sketch_data_uint8 = np.concatenate([sketch_data1_uint8, sketch_data2_uint8], + axis=0) # (N, resolution, resolution), [0-strokes, 255-BG] + photo_data_uint8 = np.concatenate([photo_data1_uint8, photo_data2_uint8], + axis=0) # (N, resolution, resolution), [0-strokes, 255-BG] + + if split == 'train': + train_photo_data.append(photo_data_uint8) + train_sketch_data.append(sketch_data_uint8) + else: + val_photo_data.append(photo_data_uint8) + val_sketch_data.append(sketch_data_uint8) + + assert len(train_sketch_data) == len(train_photo_data) + assert len(val_sketch_data) == len(val_photo_data) + else: + raise Exception('Unknown data type:', model_params.data_set) + + print('Loaded {}/{} from {}'.format(len(train_sketch_data), len(val_sketch_data), model_params.data_set)) + print('model_params.max_seq_len %i.' % model_params.max_seq_len) + + eval_sample_model_params = copy_hparams(model_params) + eval_sample_model_params.use_input_dropout = 0 + eval_sample_model_params.use_recurrent_dropout = 0 + eval_sample_model_params.use_output_dropout = 0 + eval_sample_model_params.batch_size = 1 # only sample one at a time + eval_sample_model_params.model_mode = 'eval_sample' + + train_set = GeneralDataLoaderMultiObjectRough(train_photo_data, train_sketch_data, + texture_data, shadow_data, + model_params.batch_size, model_params.raster_size, + model_params.image_size_small, model_params.image_size_large, + is_train=True) + val_set = GeneralDataLoaderMultiObjectRough(val_photo_data, val_sketch_data, + texture_data, shadow_data, + eval_sample_model_params.batch_size, + eval_sample_model_params.raster_size, + eval_sample_model_params.image_size_small, + eval_sample_model_params.image_size_large, + is_train=False) + + result = [ + train_set, val_set, model_params, eval_sample_model_params + ] + return result + + +class GeneralDataLoaderNormalImageLinear(object): + def __init__(self, + photo_data, + sketch_data, + sketch_shape, + batch_size, + raster_size, + image_size_small, + image_size_large, + random_image_size, + flip_prob, + rotate_prob, + is_train): + self.batch_size = batch_size # minibatch size + self.raster_size = raster_size + self.image_size_small = image_size_small + self.image_size_large = image_size_large + self.random_image_size = random_image_size + self.is_train = is_train + + assert photo_data is not None + assert len(photo_data) == len(sketch_data) + self.num_batches = len(sketch_data) // self.batch_size + self.batch_idx = -1 + print('batch_size', batch_size, ', num_batches', self.num_batches) + + self.flip_prob = flip_prob + self.rotate_prob = rotate_prob + + assert type(photo_data) is list + assert type(sketch_data) is list + self.photo_data = photo_data + self.sketch_data = sketch_data + self.sketch_shape = sketch_shape + + def get_batch_from_memory(self, memory_idx, interpolate_type, fixed_image_size=-1, random_cursor=True, + photo_prob=1.0, + init_cursor_num=1): + if self.random_image_size: + image_size_rand = fixed_image_size + else: + image_size_rand = self.image_size_large + + photo_data_batch, sketch_data_batch = self.select_sketch_and_crop( + image_size_rand, data_idx=memory_idx, photo_prob=photo_prob, + interpolate_type=interpolate_type) # sketch_patch: [0.0-stroke, 1.0-BG] + + photo_data_batch = np.expand_dims(photo_data_batch, axis=0) # (1, image_size, image_size, 3) + sketch_data_batch = np.expand_dims(sketch_data_batch, + axis=0) # (1, image_size, image_size), [0.0-strokes, 1.0-BG] + image_size_rand = sketch_data_batch.shape[1] + + return photo_data_batch, sketch_data_batch, \ + self.gen_init_cursors(sketch_data_batch, random_cursor, init_cursor_num), image_size_rand + + def crop_and_augment(self, photo, sketch, shape, crop_size, rotate_angle, stroke_cover=0.01): + # img: [0-stroke, 255-BG], uint8 + + def angle_convert(angle): + return angle / 180.0 * math.pi + + img_h, img_w = shape[0], shape[1] + + if self.is_train: + crop_up = random.randint(0, img_h - crop_size) + crop_left = random.randint(0, img_w - crop_size) + else: + crop_up = (img_h - crop_size) // 2 + crop_left = (img_w - crop_size) // 2 + + assert crop_up >= 0 + assert crop_left >= 0 + + crop_box = (crop_left, crop_up, crop_left + crop_size, crop_up + crop_size) + rst_sketch_image = sketch.crop(crop_box) + rst_photo_image = photo.crop(crop_box) + + if random.random() <= self.flip_prob and self.is_train: + rst_sketch_image = rst_sketch_image.transpose(Image.FLIP_LEFT_RIGHT) + rst_photo_image = rst_photo_image.transpose(Image.FLIP_LEFT_RIGHT) + + if rotate_angle != 0 and self.is_train: + rst_sketch_image = rst_sketch_image.rotate(rotate_angle, resample=Image.BILINEAR) + rst_photo_image = rst_photo_image.rotate(rotate_angle, resample=Image.BILINEAR) + rst_sketch_image = np.array(rst_sketch_image, dtype=np.uint8) + rst_photo_image = np.array(rst_photo_image, dtype=np.uint8) + + center = rst_photo_image.shape[0] // 2 + + new_dim = float(crop_size) / ( + math.sin(angle_convert(abs(rotate_angle))) + math.cos(angle_convert(abs(rotate_angle)))) + new_dim = int(round(new_dim)) + + start_pos = center - new_dim // 2 + end_pos = start_pos + new_dim + rst_sketch_image = rst_sketch_image[start_pos:end_pos, start_pos:end_pos, :] + rst_photo_image = rst_photo_image[start_pos:end_pos, start_pos:end_pos, :] + + rst_sketch_image = np.array(rst_sketch_image, dtype=np.float32) / 255.0 # [0.0-stroke, 1.0-BG] + rst_sketch_image = rst_sketch_image[:, :, 0] + rst_photo_image = np.array(rst_photo_image, dtype=np.float32) / 255.0 # [0.0-stroke, 1.0-BG] + + percentage = np.mean(1.0 - rst_sketch_image) + valid = True + if percentage < stroke_cover: + valid = False + + return rst_photo_image, rst_sketch_image, valid + + def image_interpolation(self, photo, sketch, photo_prob): + interp_photo = photo * photo_prob + sketch * (1.0 - photo_prob) + interp_photo = np.clip(interp_photo, 0.0, 1.0) + return interp_photo + + def select_sketch_and_crop(self, image_size_rand, interpolate_type, rotate_angle=0, photo_prob=1.0, + data_idx=-1, trial_times=10): + if self.is_train: + while True: + rand_img_idx = random.randint(0, len(self.sketch_data) - 1) + selected_sketch_shape = self.sketch_shape[rand_img_idx] + if selected_sketch_shape[0] >= image_size_rand and selected_sketch_shape[1] >= image_size_rand: + img_idx = rand_img_idx + break + else: + assert data_idx != -1 + img_idx = data_idx + + assert img_idx != -1 + selected_sketch = self.sketch_data[img_idx] + selected_photo = self.photo_data[img_idx] + selected_shape = self.sketch_shape[img_idx] + + assert interpolate_type in ['prob', 'image'] + + if interpolate_type == 'prob' and random.random() >= photo_prob: + selected_photo = self.sketch_data[img_idx] + + for trial_i in range(trial_times): + cropped_photo, cropped_sketch, valid = \ + self.crop_and_augment(selected_photo, selected_sketch, selected_shape, image_size_rand, rotate_angle) + # cropped_photo, cropped_sketch: [0.0-stroke, 1.0-BG] + + if valid or trial_i == trial_times - 1: + if interpolate_type == 'image': + cropped_photo = self.image_interpolation(cropped_photo, + np.stack([cropped_sketch for _ in range(3)], axis=-1), + photo_prob) + + return cropped_photo, cropped_sketch + + def get_batch_multi_res(self, loop_num, interpolate_type, random_cursor=True, init_cursor_num=1, photo_prob=1.0): + photo_data_batch = [] + sketch_data_batch = [] + init_cursors_batch = [] + image_size_batch = [] + batch_size_per_loop = self.batch_size // loop_num + for loop_i in range(loop_num): + if self.random_image_size: + image_size_rand = random.randint(self.image_size_small, self.image_size_large) + else: + image_size_rand = self.image_size_large + + rotate_angle = 0 + if random.random() <= self.rotate_prob: + rotate_angle = random.randint(-45, 45) + + photo_data_sub_batch = [] + sketch_data_sub_batch = [] + for img_i in range(batch_size_per_loop): + photo_patch, sketch_patch = \ + self.select_sketch_and_crop(image_size_rand, rotate_angle=rotate_angle, photo_prob=photo_prob, + interpolate_type=interpolate_type) # sketch_patch: [0.0-stroke, 1.0-BG] + photo_data_sub_batch.append(photo_patch) + sketch_data_sub_batch.append(sketch_patch) + + photo_data_sub_batch = np.stack(photo_data_sub_batch, + axis=0) # (N, image_size, image_size, 3), [0.0-strokes, 1.0-BG] + sketch_data_sub_batch = np.stack(sketch_data_sub_batch, + axis=0) # (N, image_size, image_size), [0.0-strokes, 1.0-BG] + init_cursors_sub_batch = self.gen_init_cursors(sketch_data_sub_batch, random_cursor, init_cursor_num) + + photo_data_batch.append(photo_data_sub_batch) + sketch_data_batch.append(sketch_data_sub_batch) + init_cursors_batch.append(init_cursors_sub_batch) + + image_size_rand = photo_data_sub_batch.shape[1] + image_size_batch.append(image_size_rand) + + return photo_data_batch, sketch_data_batch, init_cursors_batch, image_size_batch + + def crop_patch(self, image, center, image_size, crop_size): + x0 = center[0] - crop_size // 2 + x1 = x0 + crop_size + y0 = center[1] - crop_size // 2 + y1 = y0 + crop_size + x0 = max(0, min(x0, image_size)) + y0 = max(0, min(y0, image_size)) + x1 = max(0, min(x1, image_size)) + y1 = max(0, min(y1, image_size)) + patch = image[y0:y1, x0:x1] + return patch + + def gen_init_cursor_single(self, sketch_image): + # sketch_image: [0.0-stroke, 1.0-BG] + image_size = sketch_image.shape[0] + if np.sum(1.0 - sketch_image) == 0: + center = np.zeros((2), dtype=np.int32) + return center + else: + while True: + center = np.random.randint(0, image_size, size=(2)) # (2), in large size + patch = 1.0 - self.crop_patch(sketch_image, center, image_size, self.raster_size) + if np.sum(patch) != 0: + return center.astype(np.float32) / float(image_size) # (2), in size [0.0, 1.0) + + def gen_init_cursors(self, sketch_data, random_pos=True, init_cursor_num=1): + init_cursor_batch_list = [] + for cursor_i in range(init_cursor_num): + if random_pos: + init_cursor_batch = [] + for i in range(len(sketch_data)): + sketch_image = sketch_data[i].copy().astype(np.float32) # [0.0-stroke, 1.0-BG] + center = self.gen_init_cursor_single(sketch_image) + init_cursor_batch.append(center) + + init_cursor_batch = np.stack(init_cursor_batch, axis=0) # (N, 2) + else: + raise Exception('Not finished') + init_cursor_batch_list.append(init_cursor_batch) + + if init_cursor_num == 1: + init_cursor_batch = init_cursor_batch_list[0] + init_cursor_batch = np.expand_dims(init_cursor_batch, axis=1).astype(np.float32) # (N, 1, 2) + else: + init_cursor_batch = np.stack(init_cursor_batch_list, axis=1) # (N, init_cursor_num, 2) + init_cursor_batch = np.expand_dims(init_cursor_batch, axis=2).astype( + np.float32) # (N, init_cursor_num, 1, 2) + + return init_cursor_batch + + +def load_dataset_normal_images(dataset_base_dir, model_params): + train_photo_data = [] + train_sketch_data = [] + train_data_shape = [] + val_photo_data = [] + val_sketch_data = [] + val_data_shape = [] + + if model_params.data_set == 'faces': + random_training_image_size = False + flip_prob = -0.1 + rotate_prob = -0.1 + + splits = ['train', 'val'] + + database = os.path.join(dataset_base_dir, 'CelebAMask-faces') + photo_base = os.path.join(database, 'CelebA-HQ-img256') + edge_base = os.path.join(database, 'CelebAMask-HQ-edge256') + + train_split_txt_save_path = os.path.join(database, 'train.txt') + val_split_txt_save_path = os.path.join(database, 'val.txt') + celeba_train_txt = np.loadtxt(train_split_txt_save_path, dtype=str) + celeba_val_txt = np.loadtxt(val_split_txt_save_path, dtype=str) + splits_indices_map = {'train': celeba_train_txt, 'val': celeba_val_txt} + + for split in splits: + split_indices = splits_indices_map[split] + + for i in range(len(split_indices)): + file_idx = split_indices[i] + img_file_path = os.path.join(photo_base, str(file_idx) + '.jpg') + edge_img_path = os.path.join(edge_base, str(file_idx) + '.png') + + img_data = Image.open(img_file_path).convert('RGB') + edge_data = Image.open(edge_img_path).convert('RGB') + + if split == 'train': + train_photo_data.append(img_data) + train_sketch_data.append(edge_data) + train_data_shape.append((img_data.height, img_data.width)) + else: # split == 'val' + val_photo_data.append(img_data) + val_sketch_data.append(edge_data) + val_data_shape.append((img_data.height, img_data.width)) + + assert len(train_sketch_data) == len(train_data_shape) == len(train_photo_data) + assert len(val_sketch_data) == len(val_data_shape) == len(val_photo_data) + else: + raise Exception('Unknown data type:', model_params.data_set) + + print('Loaded {}/{} from {}'.format(len(train_sketch_data), len(val_sketch_data), model_params.data_set)) + print('model_params.max_seq_len %i.' % model_params.max_seq_len) + + eval_sample_model_params = copy_hparams(model_params) + eval_sample_model_params.use_input_dropout = 0 + eval_sample_model_params.use_recurrent_dropout = 0 + eval_sample_model_params.use_output_dropout = 0 + eval_sample_model_params.batch_size = 1 # only sample one at a time + eval_sample_model_params.model_mode = 'eval_sample' + + train_set = GeneralDataLoaderNormalImageLinear(train_photo_data, train_sketch_data, train_data_shape, + model_params.batch_size, model_params.raster_size, + image_size_small=model_params.image_size_small, + image_size_large=model_params.image_size_large, + random_image_size=random_training_image_size, + flip_prob=flip_prob, rotate_prob=rotate_prob, + is_train=True) + val_set = GeneralDataLoaderNormalImageLinear(val_photo_data, val_sketch_data, val_data_shape, + eval_sample_model_params.batch_size, + eval_sample_model_params.raster_size, + image_size_small=eval_sample_model_params.image_size_small, + image_size_large=eval_sample_model_params.image_size_large, + random_image_size=random_training_image_size, + flip_prob=flip_prob, rotate_prob=rotate_prob, + is_train=False) + + result = [ + train_set, val_set, model_params, eval_sample_model_params + ] + return result + + +def load_dataset_training(dataset_base_dir, model_params): + if model_params.data_set == 'clean_line_drawings': + return load_dataset_multi_object(dataset_base_dir, model_params) + elif model_params.data_set == 'rough_sketches': + return load_dataset_multi_object_rough(dataset_base_dir, model_params) + elif model_params.data_set == 'faces': + return load_dataset_normal_images(dataset_base_dir, model_params) + else: + raise Exception('Unknown data_set', model_params.data_set) diff --git a/robot_painting/qmupd_vs/docs/assets/bootstrap.min.css b/robot_painting/qmupd_vs/docs/assets/bootstrap.min.css new file mode 100644 index 0000000000000000000000000000000000000000..a9f35ceedfac7fc0559b121bed105eaf80f10bf2 --- /dev/null +++ b/robot_painting/qmupd_vs/docs/assets/bootstrap.min.css @@ -0,0 +1,5 @@ +/*! + * Bootstrap v3.2.0 (http://getbootstrap.com) + * Copyright 2011-2014 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + *//*! normalize.css v3.0.1 | MIT License | git.io/normalize */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background:0 0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}select{background:#fff!important}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:before,:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive,.thumbnail>img,.thumbnail a>img,.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;width:100% \9;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;width:100% \9;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small,h1 .small,h2 .small,h3 .small,h4 .small,h5 .small,h6 .small,.h1 .small,.h2 .small,.h3 .small,.h4 .small,.h5 .small,.h6 .small{font-weight:400;line-height:1;color:#777}h1,.h1,h2,.h2,h3,.h3{margin-top:20px;margin-bottom:10px}h1 small,.h1 small,h2 small,.h2 small,h3 small,.h3 small,h1 .small,.h1 .small,h2 .small,.h2 .small,h3 .small,.h3 .small{font-size:65%}h4,.h4,h5,.h5,h6,.h6{margin-top:10px;margin-bottom:10px}h4 small,.h4 small,h5 small,.h5 small,h6 small,.h6 small,h4 .small,.h4 .small,h5 .small,.h5 .small,h6 .small,.h6 .small{font-size:75%}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}small,.small{font-size:85%}cite{font-style:normal}mark,.mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#428bca}a.text-primary:hover{color:#3071a9}.text-success{color:#3c763d}a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#428bca}a.bg-primary:hover{background-color:#3071a9}.bg-success{background-color:#dff0d8}a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dt,dd{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote p:last-child,blockquote ul:last-child,blockquote ol:last-child{margin-bottom:0}blockquote footer,blockquote small,blockquote .small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote footer:before,blockquote small:before,blockquote .small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse footer:before,blockquote.pull-right footer:before,.blockquote-reverse small:before,blockquote.pull-right small:before,.blockquote-reverse .small:before,blockquote.pull-right .small:before{content:''}.blockquote-reverse footer:after,blockquote.pull-right footer:after,.blockquote-reverse small:after,blockquote.pull-right small:after,.blockquote-reverse .small:after,blockquote.pull-right .small:after{content:'\00A0 \2014'}blockquote:before,blockquote:after{content:""}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-xs-1,.col-sm-1,.col-md-1,.col-lg-1,.col-xs-2,.col-sm-2,.col-md-2,.col-lg-2,.col-xs-3,.col-sm-3,.col-md-3,.col-lg-3,.col-xs-4,.col-sm-4,.col-md-4,.col-lg-4,.col-xs-5,.col-sm-5,.col-md-5,.col-lg-5,.col-xs-6,.col-sm-6,.col-md-6,.col-lg-6,.col-xs-7,.col-sm-7,.col-md-7,.col-lg-7,.col-xs-8,.col-sm-8,.col-md-8,.col-lg-8,.col-xs-9,.col-sm-9,.col-md-9,.col-lg-9,.col-xs-10,.col-sm-10,.col-md-10,.col-lg-10,.col-xs-11,.col-sm-11,.col-md-11,.col-lg-11,.col-xs-12,.col-sm-12,.col-md-12,.col-lg-12{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>thead>tr>th,.table>tbody>tr>th,.table>tfoot>tr>th,.table>thead>tr>td,.table>tbody>tr>td,.table>tfoot>tr>td{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>th,.table>caption+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>td,.table>thead:first-child>tr:first-child>td{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>thead>tr>th,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>tbody>tr>td,.table-condensed>tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover,.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr.active:hover>th{background-color:#e8e8e8}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr.success:hover>th{background-color:#d0e9c6}.table>thead>tr>td.info,.table>tbody>tr>td.info,.table>tfoot>tr>td.info,.table>thead>tr>th.info,.table>tbody>tr>th.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>tbody>tr.info>td,.table>tfoot>tr.info>td,.table>thead>tr.info>th,.table>tbody>tr.info>th,.table>tfoot>tr.info>th{background-color:#d9edf7}.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover,.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr.info:hover>th{background-color:#c4e3f3}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr.warning:hover>th{background-color:#faf2cc}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr.danger:hover>th{background-color:#ebcccc}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-x:auto;overflow-y:hidden;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=radio],input[type=checkbox]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=radio]:focus,input[type=checkbox]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#777;opacity:1}.form-control:-ms-input-placeholder{color:#777}.form-control::-webkit-input-placeholder{color:#777}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee;opacity:1}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}input[type=date],input[type=time],input[type=datetime-local],input[type=month]{line-height:34px;line-height:1.42857143 \0}input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}.form-group{margin-bottom:15px}.radio,.checkbox{position:relative;display:block;min-height:20px;margin-top:10px;margin-bottom:10px}.radio label,.checkbox label{padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.radio input[type=radio],.radio-inline input[type=radio],.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox]{position:absolute;margin-top:4px \9;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type=radio][disabled],input[type=checkbox][disabled],input[type=radio].disabled,input[type=checkbox].disabled,fieldset[disabled] input[type=radio],fieldset[disabled] input[type=checkbox]{cursor:not-allowed}.radio-inline.disabled,.checkbox-inline.disabled,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.radio.disabled label,.checkbox.disabled label,fieldset[disabled] .radio label,fieldset[disabled] .checkbox label{cursor:not-allowed}.form-control-static{padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm,.form-horizontal .form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm,select[multiple].input-sm{height:auto}.input-lg,.form-horizontal .form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:46px;line-height:46px}textarea.input-lg,select[multiple].input-lg{height:auto}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:25px;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center}.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .help-block,.has-success .control-label,.has-success .radio,.has-success .checkbox,.has-success .radio-inline,.has-success .checkbox-inline{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .help-block,.has-warning .control-label,.has-warning .radio,.has-warning .checkbox,.has-warning .radio-inline,.has-warning .checkbox-inline{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .help-block,.has-error .control-label,.has-error .radio,.has-error .checkbox,.has-error .radio-inline,.has-error .checkbox-inline{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn,.form-inline .input-group .form-control{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .radio,.form-inline .checkbox{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .radio label,.form-inline .checkbox label{padding-left:0}.form-inline .radio input[type=radio],.form-inline .checkbox input[type=checkbox]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .radio,.form-horizontal .checkbox{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{top:0;right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:14.3px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn:focus,.btn:active:focus,.btn.active:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default:active,.btn-default.active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#3071a9;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-primary .badge{color:#428bca;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success:active,.btn-success.active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info:active,.btn-info.active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#428bca;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#777;text-decoration:none}.btn-lg,.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm,.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs,.btn-group-xs>.btn{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=submit].btn-block,input[type=reset].btn-block,input[type=button].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#428bca;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#777}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px solid}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn>input[type=radio],[data-toggle=buttons]>.btn>input[type=checkbox]{position:absolute;z-index:-1;filter:alpha(opacity=0);opacity:0}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn,select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn,select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=radio],.input-group-addon input[type=checkbox]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group-btn:last-child>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:first-child>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:hover,.input-group-btn>.btn:focus,.input-group-btn>.btn:active{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{max-height:340px}@media (max-width:480px) and (orientation:landscape){.navbar-fixed-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{max-height:200px}}.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030;-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}@media (min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}.navbar-nav.navbar-right:last-child{margin-right:-15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn,.navbar-form .input-group .form-control{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .radio label,.navbar-form .checkbox label{padding-left:0}.navbar-form .radio input[type=radio],.navbar-form .checkbox input[type=checkbox]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-form.navbar-right:last-child{margin-right:-15px}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}.navbar-text.navbar-right:last-child{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:hover,.navbar-default .btn-link:focus{color:#333}.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:hover,.navbar-default .btn-link[disabled]:focus,fieldset[disabled] .navbar-default .btn-link:focus{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#777}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#777}.navbar-inverse .navbar-nav>li>a{color:#777}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#777}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#777}.navbar-inverse .btn-link:hover,.navbar-inverse .btn-link:focus{color:#fff}.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:hover,.navbar-inverse .btn-link[disabled]:focus,fieldset[disabled] .navbar-inverse .btn-link:focus{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#428bca;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{color:#2a6496;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;cursor:default;background-color:#428bca;border-color:#428bca}.pagination>.disabled>span,.pagination>.disabled>span:hover,.pagination>.disabled>span:focus,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:hover,a.label:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:hover,.label-default[href]:focus{background-color:#5e5e5e}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-xs .badge{top:0;padding:1px 5px}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron h1,.jumbotron .h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron{border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-right:60px;padding-left:60px}.jumbotron h1,.jumbotron .h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img,.thumbnail a>img{margin-right:auto;margin-left:auto}a.thumbnail:hover,a.thumbnail:focus,a.thumbnail.active{border-color:#428bca}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar,.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress.active .progress-bar,.progress-bar.active{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar[aria-valuenow="1"],.progress-bar[aria-valuenow="2"]{min-width:30px}.progress-bar[aria-valuenow="0"]{min-width:30px;color:#777;background-color:transparent;background-image:none;-webkit-box-shadow:none;box-shadow:none}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{color:#555;text-decoration:none;background-color:#f5f5f5}.list-group-item.disabled,.list-group-item.disabled:hover,.list-group-item.disabled:focus{color:#777;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}.list-group-item.active .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>.small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:hover,a.list-group-item-success:focus{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:hover,a.list-group-item-success.active:focus{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:hover,a.list-group-item-info:focus{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:hover,a.list-group-item-info.active:focus{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:hover,a.list-group-item-warning:focus{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:hover,a.list-group-item-warning.active:focus{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:hover,a.list-group-item-danger:focus{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:hover,a.list-group-item-danger.active:focus{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.table,.panel>.table-responsive>.table,.panel>.panel-collapse>.table{margin-bottom:0}.panel>.table:first-child,.panel>.table-responsive:first-child>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table:last-child,.panel>.table-responsive:last-child>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child th,.panel>.table>tbody:first-child>tr:first-child td{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#428bca}.panel-primary>.panel-heading .badge{color:#428bca;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive iframe,.embed-responsive embed,.embed-responsive object{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate3d(0,-25%,0);-o-transform:translate3d(0,-25%,0);transform:translate3d(0,-25%,0)}.modal.in .modal-dialog{-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{min-height:16.43px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-size:12px;line-height:1.4;visibility:visible;filter:alpha(opacity=0);opacity:0}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{right:5px;bottom:0;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:400;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .glyphicon-chevron-left{left:50%;margin-left:-10px}.carousel-control .icon-next,.carousel-control .glyphicon-chevron-right{right:50%;margin-right:-10px}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000 \9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-15px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-15px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after,.dl-horizontal dd:before,.dl-horizontal dd:after,.container:before,.container:after,.container-fluid:before,.container-fluid:after,.row:before,.row:after,.form-horizontal .form-group:before,.form-horizontal .form-group:after,.btn-toolbar:before,.btn-toolbar:after,.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after,.nav:before,.nav:after,.navbar:before,.navbar:after,.navbar-header:before,.navbar-header:after,.navbar-collapse:before,.navbar-collapse:after,.pager:before,.pager:after,.panel-body:before,.panel-body:after,.modal-footer:before,.modal-footer:after{display:table;content:" "}.clearfix:after,.dl-horizontal dd:after,.container:after,.container-fluid:after,.row:after,.form-horizontal .form-group:after,.btn-toolbar:after,.btn-group-vertical>.btn-group:after,.nav:after,.navbar:after,.navbar-header:after,.navbar-collapse:after,.pager:after,.panel-body:after,.modal-footer:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important;visibility:hidden!important}.affix{position:fixed;-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}@-ms-viewport{width:device-width}.visible-xs,.visible-sm,.visible-md,.visible-lg{display:none!important}.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} \ No newline at end of file diff --git a/robot_painting/qmupd_vs/docs/assets/font.css b/robot_painting/qmupd_vs/docs/assets/font.css new file mode 100644 index 0000000000000000000000000000000000000000..7e660aea47882b0d1a21d474ed59e505734aa49e --- /dev/null +++ b/robot_painting/qmupd_vs/docs/assets/font.css @@ -0,0 +1,37 @@ +/* Homepage Font */ + +/* latin-ext */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 400; + src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v16/S6uyw4BMUTPHjxAwXjeu.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; +} + +/* latin */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 400; + src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v16/S6uyw4BMUTPHjx4wXg.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; +} + +/* latin-ext */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 700; + src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v16/S6u9w4BMUTPHh6UVSwaPGR_p.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; +} + +/* latin */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 700; + src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v16/S6u9w4BMUTPHh6UVSwiPGQ.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; +} diff --git a/robot_painting/qmupd_vs/docs/assets/style.css b/robot_painting/qmupd_vs/docs/assets/style.css new file mode 100644 index 0000000000000000000000000000000000000000..63a6300e67633b4352d45234837544d0f5d71f01 --- /dev/null +++ b/robot_painting/qmupd_vs/docs/assets/style.css @@ -0,0 +1,135 @@ +/* Body */ +body { + background: #e3e5e8; + color: #ffffff; + font-family: 'Lato', Verdana, Helvetica, sans-serif; + font-weight: 300; + font-size: 14pt; +} + +/* Hyperlinks */ +a {text-decoration: none;} +a:link {color: #1772d0;} +a:visited {color: #1772d0;} +a:active {color: red;} +a:hover {color: #f09228;} + +/* Pre-formatted Text */ +pre { + margin: 5pt 0; + border: 0; + font-size: 12pt; + background: #fcfcfc; +} + +/* Project Page Style */ +/* Section */ +.section { + width: 768pt; + min-height: 100pt; + margin: 15pt auto; + padding: 20pt 30pt; + border: 1pt hidden #000; + text-align: justify; + color: #000000; + background: #ffffff; +} + +/* Header (Title and Logo) */ +.section .header { + min-height: 80pt; + margin-top: 30pt; +} +.section .header .logo { + width: 80pt; + margin-left: 10pt; + float: left; +} +.section .header .logo img { + width: 80pt; + object-fit: cover; +} +.section .header .title { + margin: 0 120pt; + text-align: center; + font-size: 22pt; +} + +/* Author */ +.section .author { + margin: 5pt 0; + text-align: center; + font-size: 16pt; +} + +/* Institution */ +.section .institution { + margin: 5pt 0; + text-align: center; + font-size: 16pt; +} + +/* Hyperlink (such as Paper and Code) */ +.section .link { + margin: 5pt 0; + text-align: center; + font-size: 16pt; +} + +/* Teaser */ +.section .teaser { + margin: 20pt 0; + text-align: left; +} +.section .teaser img { + width: 95%; +} + +/* Section Title */ +.section .title { + text-align: center; + font-size: 22pt; + margin: 5pt 0 15pt 0; /* top right bottom left */ +} + +/* Section Body */ +.section .body { + margin-bottom: 15pt; + text-align: justify; + font-size: 14pt; +} + +/* BibTeX */ +.section .bibtex { + margin: 5pt 0; + text-align: left; + font-size: 22pt; +} + +/* Related Work */ +.section .ref { + margin: 20pt 0 10pt 0; /* top right bottom left */ + text-align: left; + font-size: 18pt; + font-weight: bold; +} + +/* Citation */ +.section .citation { + min-height: 60pt; + margin: 10pt 0; +} +.section .citation .image { + width: 120pt; + float: left; +} +.section .citation .image img { + max-height: 60pt; + width: 120pt; + object-fit: cover; +} +.section .citation .comment{ + margin-left: 0pt; + text-align: left; + font-size: 14pt; +} diff --git a/robot_painting/qmupd_vs/docs/figures/face-blue-1390-simplest.gif b/robot_painting/qmupd_vs/docs/figures/face-blue-1390-simplest.gif new file mode 100644 index 0000000000000000000000000000000000000000..9e39008d571e9481bd5b4e294861e5c094073755 Binary files /dev/null and b/robot_painting/qmupd_vs/docs/figures/face-blue-1390-simplest.gif differ diff --git a/robot_painting/qmupd_vs/docs/figures/muten-black-full-simplest.gif b/robot_painting/qmupd_vs/docs/figures/muten-black-full-simplest.gif new file mode 100644 index 0000000000000000000000000000000000000000..d51606ac41eeebac58ea0b8e04b866ead37bd64e Binary files /dev/null and b/robot_painting/qmupd_vs/docs/figures/muten-black-full-simplest.gif differ diff --git a/robot_painting/qmupd_vs/docs/figures/rocket-blue-simplest.gif b/robot_painting/qmupd_vs/docs/figures/rocket-blue-simplest.gif new file mode 100644 index 0000000000000000000000000000000000000000..d15f78a4440bb802beabf342f11d9edc5a055765 Binary files /dev/null and b/robot_painting/qmupd_vs/docs/figures/rocket-blue-simplest.gif differ diff --git a/robot_painting/qmupd_vs/docs/index.html b/robot_painting/qmupd_vs/docs/index.html new file mode 100644 index 0000000000000000000000000000000000000000..c4ab8fd63da2547aba35cd890d897c6d4e328052 --- /dev/null +++ b/robot_painting/qmupd_vs/docs/index.html @@ -0,0 +1,293 @@ + + + + + + + + + General Virtual Sketching Framework for Vector Line Art + + + + + + + + + + + + +
+ +
+ General Virtual Sketching Framework for Vector Line Art +
+ + +
+ 1Sun Yat-sen University,  + 2Waseda University,  +
+ 3Huawei Technologies Canada +
+
+
+ Accepted by ACM SIGGRAPH 2021 +
+ +
+ +
+
+ + Given clean line drawings, rough sketches or photographs of arbitrary resolution as input, our framework generates the corresponding vector line drawings directly. As shown in (b), the framework models a virtual pen surrounded by a dynamic window (red boxes), which moves while drawing the strokes. It learns to move around by scaling the window and sliding to an undrawn area for restarting the drawing (bottom example; sliding trajectory in blue arrow). With our proposed stroke regularization mechanism, the framework is able to enlarge the window and draw long strokes for simplicity (top example). + +
+
+ + + + +
+
Abstract
+
+ Vector line art plays an important role in graphic design, however, it is tedious to manually create. + We introduce a general framework to produce line drawings from a wide variety of images, + by learning a mapping from raster image space to vector image space. + Our approach is based on a recurrent neural network that draws the lines one by one. + A differentiable rasterization module allows for training with only supervised raster data. + We use a dynamic window around a virtual pen while drawing lines, + implemented with a proposed aligned cropping and differentiable pasting modules. + Furthermore, we develop a stroke regularization loss + that encourages the model to use fewer and longer strokes to simplify the resulting vector image. + Ablation studies and comparisons with existing methods corroborate the efficiency of our approach + which is able to generate visually better results in less computation time, + while generalizing better to a diversity of images and applications. +
+ +
+ + + + +
+
Method
+
+
+

Framework Overview

+ +
+
+ + Our framework generates the parametrized strokes step by step in a recurrent manner. + It uses a dynamic window (dashed red boxes) around a virtual pen to draw the strokes, + and can both move and change the size of the window. + (a) Four main modules at each time step: aligned cropping, stroke generation, differentiable rendering and differentiable pasting. + (b) Architecture of the stroke generation module. + (c) Structural strokes predicted at each step; + movement only is illustrated by blue arrows during which no stroke is drawn on the canvas. + +
+
+ +

+ Overall Introduction +

+

+ (Or watch on Bilibili) +
+ 👇 +

+ +

+ +
+ +
+
+ + + +
+
Results
+
+ Our framework is applicable to a diversity of image types, such as clean line drawing images, rough sketches and photographs. + +

Vectorization

+ + + + + + +
+
+ +

Rough sketch simplification

+ + + + + + + + +
+
+ +

Photograph to line drawing

+ + + + + + + + +
+
+ +

+ More Results +

+

+ (Or watch on Bilibili) +
+ 👇 +

+ +

+ +
+
+ + + +
+
+ + + + +
+
Presentations
+
+ +

+ 3-5 minute presentation +

+

+ (Or watch on Bilibili) +
+ 👇 +

+ +

+ +
+
+ + + + + +
+
+ + + + +
+
BibTeX
+
+@article{mo2021virtualsketching,
+    title   = {General Virtual Sketching Framework for Vector Line Art},
+    author  = {Mo, Haoran and Simo-Serra, Edgar and Gao, Chengying and Zou, Changqing and Wang, Ruomei},
+    journal = {ACM Transactions on Graphics (Proceedings of ACM SIGGRAPH 2021)},
+    year    = {2021},
+    volume  = {40},
+    number  = {4},
+    pages   = {51:1--51:14}
+}
+
+ +
+
Related Work
+
+
+ Jean-Dominique Favreau, Florent Lafarge and Adrien Bousseau. + Fidelity vs. Simplicity: a Global Approach to Line Drawing Vectorization. SIGGRAPH 2016. + [Paper] + [Webpage] +

+
+ +
+ Mikhail Bessmeltsev and Justin Solomon. + Vectorization of Line Drawings via PolyVector Fields. SIGGRAPH 2019. + [Paper] + [Code] +

+
+ +
+ Edgar Simo-Serra, Satoshi Iizuka and Hiroshi Ishikawa. + Mastering Sketching: Adversarial Augmentation for Structured Prediction. SIGGRAPH 2018. + [Paper] + [Webpage] + [Code] +

+
+ +
+ Zhewei Huang, Wen Heng and Shuchang Zhou. + Learning to Paint With Model-based Deep Reinforcement Learning. ICCV 2019. + [Paper] + [Code] +

+
+
+
+ + + + + diff --git a/robot_painting/qmupd_vs/draw_tools.py b/robot_painting/qmupd_vs/draw_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..fd699d1fb3ac09125fe060ae8565fb903e2837e5 --- /dev/null +++ b/robot_painting/qmupd_vs/draw_tools.py @@ -0,0 +1,657 @@ +import os +import cv2 +from matplotlib import pyplot as plt +import numpy as np +from IPython.display import clear_output +from scipy.interpolate import splprep, splev +import shutil +import glob +import time +import sys +import numpy as np +from PIL import Image +import tensorflow as tf +import cv2 +from utils import get_colors, draw, image_pasting_v3_testing +from model_common_test import DiffPastingV3 +import random +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + +def fix_edge_contour(contour, im_shape): + """ + 有时候生成的轮廓点会有一些头部或者尾部紧挨着图像边沿的情况,这样的点位是不需要的,需要过滤掉。 + 如果轮廓点头部或者尾部紧挨着图像边沿,过滤裁掉该部分的点位 + """ + # 将轮廓转换为列表 + contour = contour.tolist() + + # 检查轮廓的头部点 + while True: + x, y = contour[0][0] + if x == 0 or y == 0 or x == (im_shape[1] - 1) or y == (im_shape[0] - 1): + del contour[0] + else: + break + + # 检查轮廓的尾部点 + while True: + x, y = contour[-1][0] + if x == 0 or y == 0 or x == (im_shape[1] - 1) or y == (im_shape[0] - 1): + del contour[-1] + else: + break + + # 将轮廓转换回numpy数组 + contour = np.array(contour) + return contour + +def getContourList(image, pen_width: int = 3, min_contour_len: int = 30, is_show: bool = False): + """ + 从图像中获取轮廓列表 + :param image: 图像 + :param pen_width: 笔的粗细 + :param min_contour_len: 最短的轮廓长度 + :param is_show: 是否显示图像 + :return: 轮廓列表 + """ + # 读取图片 + # im = cv2.imread("../data/1_fake.png",cv2.IMREAD_GRAYSCALE) + if image is None: + print("Can't read the image file.") + return + elif len(image.shape) == 3: + image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + elif len(image.shape) == 4: + image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY) + # 转换二值化 + image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)[1] + + # 获取图像线条的绘制顺序,以方便于机器人连续运动绘制图像 + # Create a copy of the original image to draw contours on + image_copy = image.copy() + + image_with_contours = np.full_like(image_copy, 255) + + # Initialize a list to store the contours + contour_list = [] + + directions = [(0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (1, -1), (-1, 1), (-1, -1)] + sec0 = (0, image_copy.shape[0]) + sec1 = (sec0[1]-1, sec0[1]+image_copy.shape[1]-1) + sec2 = (sec1[1]-1, sec1[1]+image_copy.shape[0]-1) + sec3 = (sec2[1]-1, sec2[1]+image_copy.shape[1]-2) + while True: + # Find contours in the image + # 并且找到的轮廓都在黑色的像素上 + _, contours, _ = cv2.findContours(image_copy, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) + + # If no contours are found, break the loop + # 没有轮廓需要中止;当图像是全白时,可以检测到一个轮廓,也需要终止 + if len(contours) == 0 or (len(contours)==1 and np.all(image_copy == 255)): + break + + # Remove the border contour + # contours = [cnt for cnt in contours if not np.any(cnt == 0) and not np.any(cnt == height-1) and not np.any(cnt == width-1)] + # `cv2.findContours`函数在找到轮廓时,实际上是在找到黑色对象(前景)和白色背景之间的边界 + # 这意味着轮廓的坐标可能不会精确地落在原始图像的黑色像素上,而是在黑色和白色像素之间。 + # 如果你希望轮廓精确地落在黑色像素上,需要对`cv2.findContours`的结果进行一些后处理。例如,遍历轮廓的每个点,然后将它们的坐标向最近的黑色像素进行取整。 + # 避免后续在擦除时,并没有擦除原有图像的黑色像素 + print(f"pen width: {pen_width}") + if pen_width == 1: + for contour in contours: + for point in contour: + x, y = point[0] + if image_copy[y, x] == 255: + for dx, dy in directions: + nx, ny = x + dx, y + dy + if nx >= 0 and ny >= 0 and nx < image_copy.shape[1] and ny < image_copy.shape[0]: + if image_copy[ny, nx] == 0: + point[0][0] = nx + point[0][1] = ny + break + + cv2.drawContours(image_with_contours, contours, -1, 0, 1) + # erase the exist contours + cv2.drawContours(image_copy, contours, -1, 255, pen_width) + # add contours to list + # Sort the elements in contours according to the length of the elements. + # The longest contour is at the front, which is convenient for subsequent drawing and can be drawn first. + + # remove the contour when the contour is the box of image + contours = list(contours) + max_len = 0 + for i in reversed(range(len(contours))): + # 太短的也不要 + if len(contours[i]) < min_contour_len: + contours.pop(i) + continue + # 将画四个边框的轮廓去掉 + if (len(contours[i]) >= ( image_with_contours.shape[0]*2 + image_with_contours.shape[0]*2 - 4) and \ + (contours[i][sec0[0]:sec0[1], :, 0] == 0).all() and \ + (contours[i][sec1[0]:sec1[1], :, 1] == image_with_contours.shape[0]-1).all() and \ + (contours[i][sec2[0]:sec2[1], :, 0] == image_with_contours.shape[1]-1).all() and \ + (contours[i][sec3[0]:sec3[1], :, 1] == 0).all()): + contours.pop(i) + continue + contours.sort(key=lambda x: x.shape[0], reverse=True) + contour_list.extend(contours) + if is_show: + # show the image with the drawn contours + # Clear the previous plot + clear_output(wait=True) + + plt.subplot(1,3,1) + plt.imshow(image, cmap='gray', vmin=0, vmax=255) + + plt.subplot(1,3,2) + plt.imshow(image_copy, cmap='gray', vmin=0, vmax=255) + + plt.subplot(1,3,3) + # Show the image with the current contour + plt.imshow(image_with_contours, cmap='gray', vmin=0, vmax=255) + plt.show() + for i in reversed(range(len(contour_list))): + contour = contour_list[i] + contour = fix_edge_contour(contour=contour, im_shape=image.shape) + if len(contour) < min_contour_len: + contour_list.pop(i) + return contour_list + +def sortContoursList(contour_list): + """ + 根据以下规则排序: + 1. 先从最长的1/3个轮廓中,挑选出最长的一些轮廓(大致1/5的轮廓) + 2. 以上一个轮廓的终点为准,找到剩下轮廓中,起点与该点位最近的距离排序 + """ + contour_list.sort(key=lambda x: x.shape[0], reverse=True) + # 数量太少,直接返回排序后的轮廓列表,不需要太多策略 + if len(contour_list) <= 10: + return contour_list + origin_count = len(contour_list) + # 1. 先从最长的1/3个轮廓中,随机选出一些轮廓(大致1/2的轮廓), + # 这样画尝的轮廓容易先画出来图像的大体轮廓。另外,随机一下,是为了避免每次都是画同样或者相似的轮廓 + tmp_contour_list = contour_list[:int(len(contour_list)/3)] + np.random.shuffle(tmp_contour_list) + tmp_contour_list = tmp_contour_list[:int(len(tmp_contour_list)/2)] + for contour in tmp_contour_list: + for i in reversed(range(len(contour_list))): + if contour_list[i] is contour: + contour_list.pop(i) + break + ret_contour_list = tmp_contour_list + # 2. 以上一个轮廓的终点为准,找到剩下轮廓中,起点与该点位最近的距离排序 + count = len(tmp_contour_list) + while (count < origin_count): + # 找到最后一个轮廓的终点 + last_contour = ret_contour_list[-1] + last_point = last_contour[-1][0] + # 找到剩下轮廓中,起点与该点位最近的距离排序 + min_index = -1 + min_distance = 999999999 + for i in range(len(contour_list)): + # print(contour_list[i].shape) + first_point = contour_list[i][0][0] + distance = (first_point[0] - last_point[0])**2 + (first_point[1] - last_point[1])**2 + if distance < min_distance: + min_distance = distance + min_index = i + ret_contour_list.append(contour_list[min_index]) + contour_list.pop(min_index) + count += 1 + return ret_contour_list + +def remove_overlap_and_near_contours(contours_list, image_size, extend_pixel , near_threshold=0.5, min_contour_length=10): + """ + 移除重叠及过近的轮廓 + :param contours_list: 轮廓列表 + :param image_size: 图像大小 + :param extend_pixel: 扩展像素 + :param near_threshold: 过近阈值 + """ + # 思路:模拟画图,如果后面的轮廓与前面的轮廓重叠或者过近,那么就不画 + circle_lookup = np.zeros((extend_pixel*2+1, extend_pixel*2+1), dtype=np.bool_) + for i in range(-extend_pixel, extend_pixel+1): + for j in range(-extend_pixel, extend_pixel+1): + if (i**2 + j**2) <= extend_pixel**2: + circle_lookup[i, j] = True + map = np.zeros((image_size[0], image_size[1]), dtype=np.bool_) + new_contours_list = [] + for contour in contours_list: + # 太短的轨迹不画 + if len(contour) < min_contour_length: + continue + # 画图 + contour_length = len(contour) + overlap_length = 0 + for point in contour: + x, y = int(point[0][0]),int(point[0][1]) + # 统计重叠度 + if (map[x, y] == True): + overlap_length += 1 + # 与原来重叠度比较高,则去掉,这条轨迹不画了。 + if overlap_length / contour_length >= near_threshold: + continue + else: + # 去掉长度为0的轮廓 + if (len(contour) > 0): + new_contours_list.append(np.array(contour)) + else: + print("==========contour length is 0, in position 3") + # new_contours_list.append(np.array(contour)) + # 把当前轨迹经过的像素都在map中进行标记,以便于后续查询需要 + for point in contour: + x, y = int(point[0][0]),int(point[0][1]) + for i in range(-extend_pixel, extend_pixel+1): + for j in range(-extend_pixel, extend_pixel+1): + if circle_lookup[i, j]: + if x+i >= 0 and x+i < image_size[0] and y+j >= 0 and y+j < image_size[1]: + map[x+i, y+j] = True + return new_contours_list + + +def sample_and_smooth_contours(contour_list, interval: int = 5): + """ + 采样并平滑拟合轮廓 + :param contour_list: 轮廓列表 + :param interval: 采样间隔 + :return: 平滑拟合并采样后的轮廓列表。注意为浮点的数组 + """ + f_contour_list = [] + for contour in contour_list: + # 对contour中的点进行B样条进行拟合,然后平滑和重采样, + # Fit a B-spline to the contour + if (contour[0] == contour[-1]).all(): + contour = contour.reshape(-1, 2) + tck, u = splprep(contour.T, w=None, u=None, ue=None, k=3, task=0, s=1.0, t=None, full_output=0, nest=None, per=1, quiet=1) + else: + contour = contour.reshape(-1, 2) + tck, u = splprep(contour.T, w=None, u=None, ue=None, k=3, task=0, s=1.0, t=None, full_output=0, nest=None, per=0, quiet=1) + # 设置重采样的点数 + num = contour.shape[0] // interval + u_new = np.linspace(u.min(), u.max(), num) + x_new, y_new = splev(u_new, tck, der=0) + f_contour = np.array([x_new, y_new]).T.reshape(-1, 1, 2) + f_contour_list.append(f_contour) + return f_contour_list + + +def save_contour_points(contour_list, filepath): + """ + 保存轮廓点到文件中,每个轮廓占一行,x和y坐标用逗号分割,点之间用逗号分割 + Usage: + save_contour_points(f_contour_list, "../data/1_fake_data.txt") + """ + dirname = os.path.dirname(filepath) + if (not os.path.exists(dirname)): + os.makedirs(dirname) + with open(filepath, "w") as f: + for contour in contour_list: + for point in contour: + x, y = point[0] + f.write(f"{x},{y},") + f.write("\n") + + +def load_contours_list(filename): + contours_list = [] + with open(filename, "r") as f: + for line in f: + points = line.strip().split(",") + # 去处最后一个空字符 + if points[-1] == '': + points = points[:-1] + contour = [] + for i in range(0, len(points), 2): + x, y = float(points[i]), float(points[i+1]) + contour.append(np.array([[x, y]])) + # 去掉长度为0的轮廓 + if (len(contour) > 0): + contours_list.append(np.array(contour)) + print(f"Load {len(contours_list)} contours.") + return contours_list + +def generate_style_image(image_name, dataroot, output_dir): + # plt.imsave("./data/input.jpg", image) + # shutil.copy("../data/input.jpg", "../../QMUPD/examples/input.jpg") + start_time = time.time() + # curr_path = os.getcwd() + #================== settings ================== + # style_root = "../../QMUPD/" + # os.chdir(style_root) + + exp = 'QMUPD_model' + epoch='200' + gpu_id = '-1' + netga = 'resnet_style2_9blocks' + model0_res = 0 + model1_res = 0 + imgsize = 512 + extraflag = ' --netga %s --model0_res %d --model1_res %d' % (netga, model0_res, model1_res) + base_image = os.path.splitext(os.path.basename(image_name))[0] + # 生成风格图像 + # im = draw_tools.generate_style_image(image) + # cv2.imshow('image', image) + # cv2.waitKey(0) + # cv2.destroyAllWindows() + # 临时方案,把图像移动到dataset中 + if not os.path.exists(dataroot): + os.makedirs(dataroot) + else: + # 清空 + files = glob.glob(f'%s*' % dataroot) + for f in files: + os.remove(f) + # copy + shutil.copy(image_name, dataroot) + + # 清空结果 + if not os.path.exists(output_dir): + os.makedirs(output_dir) + else: + # 清空 + files = glob.glob(f'%s*' % output_dir) + for f in files: + os.remove(f) + + #==================== command ================== + vec = [0,1,0] + svec = '%d,%d,%d' % (vec[0],vec[1],vec[2]) + img1 = 'imagesstyle%d-%d-%d'%(vec[0],vec[1],vec[2]) + print('results/%s/test_%s/index%s.html'%(exp,epoch,img1[6:])) + command = 'python3 qmupd_single_image.py --dataroot %s --name %s --model test --output_nc 1 --no_dropout --model_suffix _A %s --num_test 1000 --epoch %s --style_control 1 --imagefolder %s --sinput svec --svec %s --crop_size %d --load_size %d --gpu_ids %s' % (dataroot,exp,extraflag,epoch,img1,svec,imgsize,imgsize,gpu_id) + os.system(command) + return os.path.join(output_dir, f'{base_image}_fake.png') + + +def display_strokes_final(sess, pasting_func, data, init_cursor, image_size, infer_lengths, init_width, + save_base, + cursor_type='next', min_window_size=32, raster_size=128): + """ + :param data: (N_strokes, 9): flag, x0, y0, x1, y1, x2, y2, r0, r2 + :return: + """ + canvas = np.zeros((image_size, image_size), dtype=np.float32) # [0.0-BG, 1.0-stroke] + canvas2_temp = np.zeros((image_size, image_size), dtype=np.float32) # [0.0-BG, 1.0-stroke] + drawn_region = np.zeros_like(canvas) + overlap_region = np.zeros_like(canvas) + canvas_color_with_overlap = np.zeros((image_size, image_size, 3), dtype=np.float32) + canvas_color_wo_overlap = np.zeros((image_size, image_size, 3), dtype=np.float32) + canvas_color_with_moving = np.zeros((image_size, image_size, 3), dtype=np.float32) + + cursor_idx = 0 + + if init_cursor.ndim == 1: + init_cursor = [init_cursor] + + stroke_count = len(data) + color_rgb_set = get_colors(stroke_count) # list of (3,) in [0, 255] + color_idx = 0 + + valid_stroke_count = stroke_count - np.sum(data[:, 0]).astype(np.int32) + len(init_cursor) + valid_color_rgb_set = get_colors(valid_stroke_count) # list of (3,) in [0, 255] + valid_color_idx = -1 + # print('Drawn stroke number', valid_stroke_count) + # print(' flag x1\t\t y1\t\t x2\t\t y2\t\t r2\t\t s2') + + # tempimage = np.zeros((image_size, image_size, 3), dtype=np.uint8) + 255 + # color = random.randint(50, 120) + # cv2.imshow('canvas_rgb', tempimage) + contours_list = [] + for round_idx in range(len(infer_lengths)): + contour = [] + round_length = infer_lengths[round_idx] + + cursor_pos = init_cursor[cursor_idx] # (2) + cursor_idx += 1 + prev_width = init_width + prev_scaling = 1.0 + prev_window_size = float(raster_size) # (1) + # cv2.imshow('canvas_rgb', canvas_black) + # 每个笔画 + last_point = None + for round_inner_i in range(round_length): + stroke_idx = np.sum(infer_lengths[:round_idx]).astype(np.int32) + round_inner_i + + curr_window_size_raw = prev_scaling * prev_window_size + curr_window_size_raw = np.maximum(curr_window_size_raw, min_window_size) + curr_window_size_raw = np.minimum(curr_window_size_raw, image_size) + + pen_state = data[stroke_idx, 0] + stroke_params = data[stroke_idx, 1:] # (8) + x1y1, x2y2, width2, scaling2 = stroke_params[0:2], stroke_params[2:4], stroke_params[4], stroke_params[5] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width, width2], axis=0) # (2) + stroke_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1) # (8) + + next_width = stroke_params[4] + next_scaling = stroke_params[5] + next_window_size = next_scaling * curr_window_size_raw + next_window_size = np.maximum(next_window_size, min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_width = next_width * curr_window_size_raw / next_window_size + prev_scaling = next_scaling + prev_window_size = curr_window_size_raw + + f = stroke_params_proc.tolist() # (8) + f += [1.0, 1.0] + gt_stroke_img, contour_deatil = draw(f) # (H, W), [0.0-stroke, 1.0-BG] + # print("stroke image", contour) + # contour = cursor_pos * image_size + contour + # cv2.imshow('canvas_stroke', gt_stroke_img) + # print("gt_stroke_img shape:", gt_stroke_img.shape) + # cv2.waitKey(30) + gt_stroke_img_large = image_pasting_v3_testing(1.0 - gt_stroke_img, cursor_pos, + image_size, + curr_window_size_raw, + pasting_func, sess) # [0.0-BG, 1.0-stroke] + # print("gt_stroke_img_large shape:", gt_stroke_img_large.shape) + is_overlap = False + + if pen_state == 0: + canvas += gt_stroke_img_large # [0.0-BG, 1.0-stroke] + # print("canvas shape:", canvas.shape) + # cv2.imshow('canvas_rgb_lager', canvas) + # cv2.waitKey(30) + curr_drawn_stroke_region = np.zeros_like(gt_stroke_img_large) + curr_drawn_stroke_region[gt_stroke_img_large > 0.5] = 1 + intersection = drawn_region * curr_drawn_stroke_region + # regard stroke with >50% overlap area as overlaped stroke + if np.sum(intersection) / np.sum(curr_drawn_stroke_region) > 0.5: + # enlarge the stroke a bit for better visualization + overlap_region[gt_stroke_img_large > 0] += 1 + is_overlap = True + + drawn_region[gt_stroke_img_large > 0.5] = 1 + + color_rgb = color_rgb_set[color_idx] # (3) in [0, 255] + color_idx += 1 + + color_rgb = np.reshape(color_rgb, (1, 1, 3)).astype(np.float32) + color_stroke = np.expand_dims(gt_stroke_img_large, axis=-1) * (1.0 - color_rgb / 255.0) + canvas_color_with_moving = canvas_color_with_moving * np.expand_dims((1.0 - gt_stroke_img_large), + axis=-1) + color_stroke # (H, W, 3) + if pen_state == 0: + valid_color_idx += 1 + + if pen_state == 0: + valid_color_rgb = valid_color_rgb_set[valid_color_idx] # (3) in [0, 255] + # valid_color_idx += 1 + + valid_color_rgb = np.reshape(valid_color_rgb, (1, 1, 3)).astype(np.float32) + valid_color_stroke = np.expand_dims(gt_stroke_img_large, axis=-1) * (1.0 - valid_color_rgb / 255.0) + canvas_color_with_overlap = canvas_color_with_overlap * np.expand_dims((1.0 - gt_stroke_img_large), + axis=-1) + valid_color_stroke # (H, W, 3) + if not is_overlap: + canvas_color_wo_overlap = canvas_color_wo_overlap * np.expand_dims((1.0 - gt_stroke_img_large), + axis=-1) + valid_color_stroke # (H, W, 3) + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = stroke_params[2:4] * (float(curr_window_size_raw) / 2.0) # (1, 6), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[1:2], new_cursor_offset_next[0:1]], axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + + stroke_position_next = cursor_pos_large + new_cursor_offset_next # (2), large-level + + if cursor_type == 'next': + cursor_pos_large = stroke_position_next # (2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), float(image_size - 1)) # (2), large-level + if (pen_state == 0): + # cursor_pos_fact = int(cursor_pos * float(image_size) + 0.5) + cursor_pos_fact = np.minimum(np.maximum(cursor_pos * float(image_size), 0.0), float(image_size - 1)) + # 假如超出边界 + # cv2.circle(canvas2_temp, (int(cursor_pos_fact[0]), int(cursor_pos_fact[1])), 2, (255, 0, 0), 1) + # cv2.line(canvas2_temp, (int(cursor_pos_fact[0]), int(cursor_pos_fact[1])), (int(cursor_pos_large[0]), int(cursor_pos_large[1])), (255, 0, 0), 1) + # 有起点, 终点, 和轨迹 + if (last_point is not None): + # 如果这一笔的笔画琪点和上一笔的笔画不在同一个位置 + if ((int(cursor_pos_fact[0]) != int(last_point[0]) or int(cursor_pos_fact[1]) != int(last_point[1]))): + # 如果距离比较接近,也算同一个轨迹上面(减少机械臂抬手动作) + if (np.linalg.norm(cursor_pos_fact - last_point) > 2): + # print("add contour and new one") + # 去掉长度为0的轮廓 + if (len(contour) > 0): + contours_list.append(np.array(contour)) + else: + print("==========contour length is 0, in position 1") + # contours_list.append(np.array(contour)) + contour = [] + + for x in contour_deatil: + # x[0] 转为 np.array + x = np.array(x) + point_pos = (x[0] - 128) * curr_window_size_raw / 256 + cursor_pos_fact + point_pos[0] = min(point_pos[0], image_size - 1) + point_pos[1] = min(point_pos[1], image_size - 1) + # 去重 + if (last_point is not None): + if (int(point_pos[0]) != int(last_point[0]) or int(point_pos[1]) != int(last_point[1])): + contour.append(np.array([[point_pos[0], point_pos[1]]])) + last_point = point_pos + cv2.circle(canvas2_temp, (int(point_pos[0]), int(point_pos[1])), 1, (255, 255, 0), 1) + else: + contour.append(np.array([[point_pos[0], point_pos[1]]])) + last_point = point_pos + cv2.circle(canvas2_temp, (int(point_pos[0]), int(point_pos[1])), 1, (255, 255, 0), 1) + + # print(len(contour)) + # cv2.circle(canvas2_temp, (int(point_pos[0]), int(point_pos[1])), 1, (255, 255, 0), 1) + # break + # break + # print("cursor_pos_fact:", contour) + # cv2.imshow('canvas_rgb', canvas2_temp) + # cv2.waitKey(30) + + cursor_pos = cursor_pos_large / float(image_size) + + # print(int(cursor_pos[0] * image_size), int(cursor_pos[1] * image_size)) + # 在对应位置画个点 + # tempimage = cv2.circle(tempimage, (int(cursor_pos[0] * image_size), int(cursor_pos[1] * image_size)), 2, (color, color, color) , 1) + # cv2.imshow('canvas_rgb', tempimage) + # cv2.waitKey(30) + # if (pen_state == 0): + # contour.append([[cursor_pos[0] * image_size, cursor_pos[1] * image_size]]) + # 去掉长度为0的轮廓 + if (len(contour) > 0): + contours_list.append(np.array(contour)) + # canvas_rgb = np.stack([np.clip(canvas, 0.0, 1.0) for _ in range(3)], axis=-1) + canvas_color_with_overlap = 255 - np.round(canvas_color_with_overlap * 255.0).astype(np.uint8) + canvas_color_wo_overlap = 255 - np.round(canvas_color_wo_overlap * 255.0).astype(np.uint8) + canvas_color_with_moving = 255 - np.round(canvas_color_with_moving * 255.0).astype(np.uint8) + + canvas_color_png = Image.fromarray(canvas_color_with_overlap, 'RGB') + canvas_color_save_path = os.path.join(save_base, 'output_order_with_overlap.png') + canvas_color_png.save(canvas_color_save_path, 'PNG') + return contours_list + +def drawContours(contours_list, cavas_size): + image = np.zeros(cavas_size, dtype=np.uint8) + 255 + for contour in contours_list: + # color = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255) + color = (0, 0, 0) + for i in range(len(contour)): + point = contour[i] + if i < len(contour) - 1: + # cv2.line(image, tuple(contour[i][0]), tuple(contour[i+1][0]), color, 1) + cv2.circle(image, (int(point[0][0]), int(point[0][1])), 1, color, 1) + return image + + +def getContourList_v2(npz_path): + assert npz_path != '' + + min_window_size = 32 + raster_size = 128 + + split_idx = npz_path.rfind('/') + if split_idx == -1: + file_base = './' + file_name = npz_path[:-4] + else: + file_base = npz_path[:npz_path.rfind('/')] + file_name = npz_path[npz_path.rfind('/') + 1: -4] + + regenerate_base = os.path.join(file_base, file_name) + os.makedirs(regenerate_base, exist_ok=True) + + # differentiable pasting graph + paste_v3_func = DiffPastingV3(raster_size) + + tfconfig = tf.ConfigProto() + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + data = np.load(npz_path, encoding='latin1', allow_pickle=True) + strokes_data = data['strokes_data'] + init_cursors = data['init_cursors'] + image_size = data['image_size'] + round_length = data['round_length'] + init_width = data['init_width'] + if round_length.ndim == 0: + round_lengths = [round_length] + else: + round_lengths = round_length + print('Processing ...') + contours_list = display_strokes_final(sess, paste_v3_func, + strokes_data, init_cursors, image_size, round_lengths, init_width, + regenerate_base, + min_window_size=min_window_size, raster_size=raster_size) + return contours_list +# # mian +# if __name__ == "__main__": +# # 读取图片 +# im = cv2.imread("../data/1_fake.png",cv2.IMREAD_GRAYSCALE) +# # 获取轮廓列表 +# contour_list = getContourList(im, is_show=True) +# # 对轮廓列表进行排序 +# contour_list = sortContoursList(contour_list) +# # 平滑拟合并采样轮廓 +# f_contour_list = sample_and_smooth_contours(im, contour_list, is_show=True) +# # 保存轮廓点到文件中,每个轮廓占一行,x和y坐标用逗号分割,点之间用逗号分割 +# save_contour_points(f_contour_list, "../data/1_fake_data.txt") + + + + +if __name__ == '__main__': + file = "./robot_data/contour_points/image_e1b3f4a3-08f1-4d52-ab40-c5badf38b46e_fake_contour_points.txt" + contours_lists = load_contours_list(file) + contours_lists = sortContoursList(contours_lists) + cv2.imshow("sorted", drawContours(contours_lists, (512, 512,3))) + contours_lists = remove_overlap_and_near_contours(contours_lists, (512, 512), 3, 0.9, 5) + # contours_lists = remove_overlap_and_near_contours(contours_lists, (512, 512), 4, 0.7) + cv2.imshow("remove overlap", drawContours(contours_lists, (512, 512,3))) + # save_contour_points(contours_lists, "./image_e1b3f4a3-08f1-4d52-ab40-c5badf38b46e_fake_0_contour_points_sorted.txt") + #contours_lists = sample_and_smooth_contours(contours_lists, 10) + cv2.imshow("sample and smooth", drawContours(contours_lists, (512, 512,3))) + cv2.waitKey(0) \ No newline at end of file diff --git a/robot_painting/qmupd_vs/environment.yaml b/robot_painting/qmupd_vs/environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c962535006e16726b9db9e434d26c8240b49700a --- /dev/null +++ b/robot_painting/qmupd_vs/environment.yaml @@ -0,0 +1,115 @@ +name: vsketch +channels: + - pytorch + - http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main + - http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free + - http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge + - http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ + - http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/ + - http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/ +dependencies: + - _libgcc_mutex=0.1=conda_forge + - _openmp_mutex=4.5=2_kmp_llvm + - blas=1.0=mkl + - ca-certificates=2024.3.11=h06a4308_0 + - cairo=1.14.8=0 + - certifi=2016.2.28=py36_0 + - cpuonly=2.0=0 + - cudatoolkit=10.0.130=0 + - cycler=0.10.0=py36_0 + - dbus=1.10.20=0 + - dominate=2.4.0=py_0 + - expat=2.1.0=0 + - fftw=3.3.9=h5eee18b_2 + - fontconfig=2.12.1=3 + - freetype=2.5.5=2 + - glib=2.50.2=1 + - gst-plugins-base=1.8.0=0 + - gstreamer=1.8.0=0 + - hdf5=1.10.2=hc401514_3 + - icu=54.1=0 + - jbig=2.1=0 + - jpeg=9b=0 + - ld_impl_linux-64=2.38=h1181459_1 + - libblas=3.9.0=1_h6e990d7_netlib + - libcblas=3.9.0=3_h893e4fe_netlib + - libffi=3.4.4=h6a678d5_0 + - libgcc=7.2.0=h69d50b8_2 + - libgcc-ng=13.2.0=h807b86a_5 + - libgfortran=3.0.0=1 + - libgfortran-ng=7.5.0=ha8ba4b0_17 + - libgfortran4=7.5.0=ha8ba4b0_17 + - libgomp=13.2.0=h807b86a_5 + - libiconv=1.14=0 + - liblapack=3.9.0=3_h893e4fe_netlib + - libopenblas=0.3.18=hf726d26_0 + - libpng=1.6.39=h5eee18b_0 + - libstdcxx-ng=11.2.0=h1234567_1 + - libtiff=4.0.6=3 + - libwebp-base=1.3.2=h5eee18b_0 + - libxcb=1.12=1 + - libxml2=2.9.4=0 + - llvm-openmp=14.0.6=h9e868ea_0 + - lz4-c=1.9.4=h6a678d5_0 + - matplotlib=2.0.2=np113py36_0 + - mkl=2017.0.3=0 + - ncurses=6.4=h6a678d5_0 + - olefile=0.46=pyhd3eb1b0_0 + - opencv=3.4.1=py36h6fd60c2_1 + - openssl=1.0.2l=0 + - pip=21.3.1 + - pcre=8.39=1 + - pillow=4.2.1=py36_0 + - pixman=0.34.0=0 + - pyparsing=2.2.0=py36_0 + - pyqt=5.6.0=py36_2 + - python=3.6.2=0 + - python-dateutil=2.6.1=py36_0 + - python_abi=3.6=2_cp36m + - pytorch-mutex=1.0=cpu + - pytz=2017.2=py36_0 + - qt=5.6.2=5 + - readline=6.2=2 + - scipy=0.19.1=np113py36_0 + - setuptools=36.4.0=py36_1 + - sip=4.18=py36_0 + - sqlite=3.13.0=0 + - tk=8.5.18=0 + - wheel=0.29.0=py36_0 + - xz=5.2.3=0 + - zlib=1.2.13=h5eee18b_0 + - zstd=1.3.3=h84994c4_0 + - pip: + - absl-py==1.4.0 + - astor==0.8.1 + - cached-property==1.5.2 + - cairocffi==1.0.0 + - cffi==1.15.1 + - dataclasses==0.8 + - gast==0.5.4 + - gizeh==0.1.11 + - grpcio==1.48.2 + - h5py==3.1.0 + - importlib-metadata==4.8.3 + - importlib-resources==5.4.0 + - keras-applications==1.0.8 + - keras-preprocessing==1.1.2 + - markdown==3.3.7 + - munch==4.0.0 + - numpy==1.17.0 + - opencv-python==3.4.2.16 + - pip==21.3.1 + - pretrainedmodels==0.7.4 + - protobuf==3.19.6 + - pycparser==2.21 + - six==1.16.0 + - tensorboard==1.12.2 + - tensorflow==1.12.0 + - termcolor==1.1.0 + - torch==1.2.0+cpu + - torchvision==0.4.0+cpu + - tqdm==4.64.1 + - typing-extensions==4.1.1 + - werkzeug==2.0.3 + - zipp==3.6.0 +prefix: /home/qian/anaconda3/envs/vsketch diff --git a/robot_painting/qmupd_vs/examples/celebahq-11103.jpg b/robot_painting/qmupd_vs/examples/celebahq-11103.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02c594956835579c76f00cf41dca9d803d56f4d4 Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-11103.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-11918.jpg b/robot_painting/qmupd_vs/examples/celebahq-11918.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21852a3f266515c979a2b74b3c52b17dbe341940 Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-11918.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-15556.jpg b/robot_painting/qmupd_vs/examples/celebahq-15556.jpg new file mode 100644 index 0000000000000000000000000000000000000000..12503163767fefec021d37e593009992ad87799c Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-15556.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-25033.jpg b/robot_painting/qmupd_vs/examples/celebahq-25033.jpg new file mode 100644 index 0000000000000000000000000000000000000000..536e7fc045052f7a51e908b21d582485d8eda519 Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-25033.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-2524.jpg b/robot_painting/qmupd_vs/examples/celebahq-2524.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b145b96861f531eb6003e198445742694295ac73 Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-2524.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-26036.jpg b/robot_painting/qmupd_vs/examples/celebahq-26036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1053b05098a6a66216ce14daedea0eee114737ae Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-26036.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-27799.jpg b/robot_painting/qmupd_vs/examples/celebahq-27799.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfc7c6467d9e13b7fbd69237326c150a2554323d Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-27799.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-4797.jpg b/robot_painting/qmupd_vs/examples/celebahq-4797.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf18592f8eb43ba7f80232291b4d3da51bda62b4 Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-4797.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-7235.jpg b/robot_painting/qmupd_vs/examples/celebahq-7235.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f58117681d314434e9a0480afff9ab0a21e2839 Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-7235.jpg differ diff --git a/robot_painting/qmupd_vs/examples/celebahq-896.jpg b/robot_painting/qmupd_vs/examples/celebahq-896.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfa58028e8fca031925d8178a2751906b3d79fa2 Binary files /dev/null and b/robot_painting/qmupd_vs/examples/celebahq-896.jpg differ diff --git a/robot_painting/qmupd_vs/hyper_parameters.py b/robot_painting/qmupd_vs/hyper_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..66a3fa9f938d0b35f09a84811cb0058cda94e6a6 --- /dev/null +++ b/robot_painting/qmupd_vs/hyper_parameters.py @@ -0,0 +1,341 @@ +import tensorflow as tf + + +############################################# +# Common parameters +############################################# + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string( + 'dataset_dir', + 'datasets', + 'The directory of sketch data of the dataset.') +tf.app.flags.DEFINE_string( + 'log_root', + 'outputs/log', + 'Directory to store tensorboard.') +tf.app.flags.DEFINE_string( + 'log_img_root', + 'outputs/log_img', + 'Directory to store intermediate output images.') +tf.app.flags.DEFINE_string( + 'snapshot_root', + 'outputs/snapshot', + 'Directory to store model checkpoints.') +tf.app.flags.DEFINE_string( + 'neural_renderer_path', + 'outputs/snapshot/pretrain_neural_renderer/renderer_300000.tfmodel', + 'Path to the neural renderer model.') +tf.app.flags.DEFINE_string( + 'perceptual_model_root', + 'outputs/snapshot/pretrain_perceptual_model', + 'Directory to store perceptual model.') +tf.app.flags.DEFINE_string( + 'data', + '', + 'The dataset type.') + + +def get_default_hparams_clean(): + """Return default HParams for sketch-rnn.""" + hparams = tf.contrib.training.HParams( + program_name='new_train_clean_line_drawings', + data_set='clean_line_drawings', # Our dataset. + + input_channel=1, + + num_steps=75040, # Total number of steps of training. + save_every=75000, + eval_every=5000, + + max_seq_len=48, + batch_size=20, + gpus=[0, 1], + loop_per_gpu=1, + + sn_loss_type='increasing', # ['decreasing', 'fixed', 'increasing'] + stroke_num_loss_weight=0.02, + stroke_num_loss_weight_end=0.0, + increase_start_steps=25000, + decrease_stop_steps=40000, + + perc_loss_layers=['ReLU1_2', 'ReLU2_2', 'ReLU3_3', 'ReLU5_1'], + perc_loss_fuse_type='add', # ['max', 'add', 'raw_add', 'weighted_sum'] + + init_cursor_on_undrawn_pixel=False, + + early_pen_loss_type='move', # ['head', 'tail', 'move'] + early_pen_loss_weight=0.1, + early_pen_length=7, + + min_width=0.01, + min_window_size=32, + max_scaling=2.0, + + encode_cursor_type='value', + + image_size_small=128, + image_size_large=278, + + cropping_type='v3', # ['v2', 'v3'] + pasting_type='v3', # ['v2', 'v3'] + pasting_diff=True, + + concat_win_size=True, + + encoder_type='conv13_c3', + # ['conv10', 'conv10_deep', 'conv13', 'conv10_c3', 'conv10_deep_c3', 'conv13_c3'] + # ['conv13_c3_attn'] + # ['combine33', 'combine43', 'combine53', 'combineFC'] + vary_thickness=False, + + outside_loss_weight=10.0, + win_size_outside_loss_weight=10.0, + + resize_method='AREA', # ['BILINEAR', 'NEAREST_NEIGHBOR', 'BICUBIC', 'AREA'] + + concat_cursor=True, + + use_softargmax=True, + soft_beta=10, # value for the soft argmax + + raster_loss_weight=1.0, + + dec_rnn_size=256, # Size of decoder. + dec_model='hyper', # Decoder: lstm, layer_norm or hyper. + # z_size=128, # Size of latent vector z. Recommend 32, 64 or 128. + bin_gt=True, + + stop_accu_grad=True, + + random_cursor=True, + cursor_type='next', + + raster_size=128, + + pix_drop_kp=1.0, # Dropout keep rate + add_coordconv=True, + position_format='abs', + raster_loss_base_type='perceptual', # [l1, mse, perceptual] + + grad_clip=1.0, # Gradient clipping. Recommend leaving at 1.0. + + learning_rate=0.0001, # Learning rate. + decay_rate=0.9999, # Learning rate decay per minibatch. + decay_power=0.9, + min_learning_rate=0.000001, # Minimum learning rate. + + use_recurrent_dropout=True, # Dropout with memory loss. Recommended + recurrent_dropout_prob=0.90, # Probability of recurrent dropout keep. + use_input_dropout=False, # Input dropout. Recommend leaving False. + input_dropout_prob=0.90, # Probability of input dropout keep. + use_output_dropout=False, # Output dropout. Recommend leaving False. + output_dropout_prob=0.90, # Probability of output dropout keep. + + model_mode='train' # ['train', 'eval', 'sample'] + ) + return hparams + + +def get_default_hparams_rough(): + """Return default HParams for sketch-rnn.""" + hparams = tf.contrib.training.HParams( + program_name='new_train_rough_sketches', + data_set='rough_sketches', # ['rough_sketches', 'faces'] + + input_channel=3, + + num_steps=90040, # Total number of steps of training. + save_every=90000, + eval_every=5000, + + max_seq_len=48, + batch_size=20, + gpus=[0, 1], + loop_per_gpu=1, + + sn_loss_type='increasing', # ['decreasing', 'fixed', 'increasing'] + stroke_num_loss_weight=0.1, + stroke_num_loss_weight_end=0.0, + increase_start_steps=25000, + decrease_stop_steps=40000, + + photo_prob_type='one', # ['increasing', 'zero', 'one'] + photo_prob_start_step=35000, + + perc_loss_layers=['ReLU2_2', 'ReLU3_3', 'ReLU5_1'], + perc_loss_fuse_type='add', # ['max', 'add', 'raw_add', 'weighted_sum'] + + early_pen_loss_type='move', # ['head', 'tail', 'move'] + early_pen_loss_weight=0.2, + early_pen_length=7, + + min_width=0.01, + min_window_size=32, + max_scaling=2.0, + + encode_cursor_type='value', + + image_size_small=128, + image_size_large=278, + + cropping_type='v3', # ['v2', 'v3'] + pasting_type='v3', # ['v2', 'v3'] + pasting_diff=True, + + concat_win_size=True, + + encoder_type='conv13_c3', + # ['conv10', 'conv10_deep', 'conv13', 'conv10_c3', 'conv10_deep_c3', 'conv13_c3'] + # ['conv13_c3_attn'] + # ['combine33', 'combine43', 'combine53', 'combineFC'] + + outside_loss_weight=10.0, + win_size_outside_loss_weight=10.0, + + resize_method='AREA', # ['BILINEAR', 'NEAREST_NEIGHBOR', 'BICUBIC', 'AREA'] + + concat_cursor=True, + + use_softargmax=True, + soft_beta=10, # value for the soft argmax + + raster_loss_weight=1.0, + + dec_rnn_size=256, # Size of decoder. + dec_model='hyper', # Decoder: lstm, layer_norm or hyper. + # z_size=128, # Size of latent vector z. Recommend 32, 64 or 128. + bin_gt=True, + + stop_accu_grad=True, + + random_cursor=True, + cursor_type='next', + + raster_size=128, + + pix_drop_kp=1.0, # Dropout keep rate + add_coordconv=True, + position_format='abs', + raster_loss_base_type='perceptual', # [l1, mse, perceptual] + + grad_clip=1.0, # Gradient clipping. Recommend leaving at 1.0. + + learning_rate=0.0001, # Learning rate. + decay_rate=0.9999, # Learning rate decay per minibatch. + decay_power=0.9, + min_learning_rate=0.000001, # Minimum learning rate. + + use_recurrent_dropout=True, # Dropout with memory loss. Recommended + recurrent_dropout_prob=0.90, # Probability of recurrent dropout keep. + use_input_dropout=False, # Input dropout. Recommend leaving False. + input_dropout_prob=0.90, # Probability of input dropout keep. + use_output_dropout=False, # Output dropout. Recommend leaving False. + output_dropout_prob=0.90, # Probability of output dropout keep. + + model_mode='train' # ['train', 'eval', 'sample'] + ) + return hparams + + +def get_default_hparams_normal(): + """Return default HParams for sketch-rnn.""" + hparams = tf.contrib.training.HParams( + program_name='new_train_faces', + data_set='faces', # ['rough_sketches', 'faces'] + + input_channel=3, + + num_steps=90040, # Total number of steps of training. + save_every=90000, + eval_every=5000, + + max_seq_len=48, + batch_size=20, + gpus=[0, 1], + loop_per_gpu=1, + + sn_loss_type='fixed', # ['decreasing', 'fixed', 'increasing'] + stroke_num_loss_weight=0.0, + stroke_num_loss_weight_end=0.0, + increase_start_steps=0, + decrease_stop_steps=40000, + + photo_prob_type='interpolate', # ['increasing', 'zero', 'one', 'interpolate'] + photo_prob_start_step=30000, + photo_prob_end_step=60000, + + perc_loss_layers=['ReLU2_2', 'ReLU3_3', 'ReLU4_2', 'ReLU5_1'], + perc_loss_fuse_type='add', # ['max', 'add', 'raw_add', 'weighted_sum'] + + early_pen_loss_type='move', # ['head', 'tail', 'move'] + early_pen_loss_weight=0.2, + early_pen_length=7, + + min_width=0.01, + min_window_size=32, + max_scaling=2.0, + + encode_cursor_type='value', + + image_size_small=128, + image_size_large=256, + + cropping_type='v3', # ['v2', 'v3'] + pasting_type='v3', # ['v2', 'v3'] + pasting_diff=True, + + concat_win_size=True, + + encoder_type='conv13_c3', + # ['conv10', 'conv10_deep', 'conv13', 'conv10_c3', 'conv10_deep_c3', 'conv13_c3'] + # ['conv13_c3_attn'] + # ['combine33', 'combine43', 'combine53', 'combineFC'] + + outside_loss_weight=10.0, + win_size_outside_loss_weight=10.0, + + resize_method='AREA', # ['BILINEAR', 'NEAREST_NEIGHBOR', 'BICUBIC', 'AREA'] + + concat_cursor=True, + + use_softargmax=True, + soft_beta=10, # value for the soft argmax + + raster_loss_weight=1.0, + + dec_rnn_size=256, # Size of decoder. + dec_model='hyper', # Decoder: lstm, layer_norm or hyper. + # z_size=128, # Size of latent vector z. Recommend 32, 64 or 128. + bin_gt=True, + + stop_accu_grad=True, + + random_cursor=True, + cursor_type='next', + + raster_size=128, + + pix_drop_kp=1.0, # Dropout keep rate + add_coordconv=True, + position_format='abs', + raster_loss_base_type='perceptual', # [l1, mse, perceptual] + + grad_clip=1.0, # Gradient clipping. Recommend leaving at 1.0. + + learning_rate=0.0001, # Learning rate. + decay_rate=0.9999, # Learning rate decay per minibatch. + decay_power=0.9, + min_learning_rate=0.000001, # Minimum learning rate. + + use_recurrent_dropout=True, # Dropout with memory loss. Recommended + recurrent_dropout_prob=0.90, # Probability of recurrent dropout keep. + use_input_dropout=False, # Input dropout. Recommend leaving False. + input_dropout_prob=0.90, # Probability of input dropout keep. + use_output_dropout=False, # Output dropout. Recommend leaving False. + output_dropout_prob=0.90, # Probability of output dropout keep. + + model_mode='train' # ['train', 'eval', 'sample'] + ) + return hparams diff --git a/robot_painting/qmupd_vs/main.py b/robot_painting/qmupd_vs/main.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa3f12c95d784b4d7c31fe6cfa46de1c55ca3f4 --- /dev/null +++ b/robot_painting/qmupd_vs/main.py @@ -0,0 +1,574 @@ + +from camera_tools import CameraApp +import draw_tools +import cv2 +import os +from options.test_options import TestOptions +from data import create_dataset +from models import create_model +from util.visualizer import save_images +import shutil +import os, glob +import warnings +import util +import paramiko + +#================== settings ================== +exp = 'QMUPD_model' +epoch='200' +dataroot = 'robot_data/dataset/' +gpu_id = '-1' +netga = 'resnet_style2_9blocks' +model0_res = 0 +model1_res = 0 +imgsize = 512 +extraflag = ' --netga %s --model0_res %d --model1_res %d' % (netga, model0_res, model1_res) +output_dir = 'robot_data/output/' + +import numpy as np +import os +import tensorflow as tf +from six.moves import range +from PIL import Image +import argparse + +import hyper_parameters as hparams +from model_common_test import DiffPastingV3, VirtualSketchingModel +from utils import reset_graph, load_checkpoint, update_hyperparams, draw, \ + save_seq_data, image_pasting_v3_testing, draw_strokes +from dataset_utils import load_dataset_testing + +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + + +def move_cursor_to_undrawn(current_pos_list, input_image_, patch_size, + move_min_dist, move_max_dist, trial_times=20): + """ + :param current_pos_list: (select_times, 1, 2), [0.0, 1.0) + :param input_image_: (1, image_size, image_size, 3), [0-stroke, 1-BG] + :return: new_cursor_pos: (select_times, 1, 2), [0.0, 1.0) + """ + + def crop_patch(image, center, image_size, crop_size): + x0 = center[0] - crop_size // 2 + x1 = x0 + crop_size + y0 = center[1] - crop_size // 2 + y1 = y0 + crop_size + x0 = max(0, min(x0, image_size)) + y0 = max(0, min(y0, image_size)) + x1 = max(0, min(x1, image_size)) + y1 = max(0, min(y1, image_size)) + patch = image[y0:y1, x0:x1] + return patch + + def isvalid_cursor(input_img, cursor, raster_size, image_size): + # input_img: (image_size, image_size, 3), [0.0-BG, 1.0-stroke] + cursor_large = cursor * float(image_size) + cursor_large = np.round(cursor_large).astype(np.int32) + input_crop_patch = crop_patch(input_img, cursor_large, image_size, raster_size) + if np.sum(input_crop_patch) > 0.0: + return True + else: + return False + + def randomly_move_cursor(cursor_position, img_size, min_dist_p, max_dist_p): + # cursor_position: (2), [0.0, 1.0) + cursor_pos_large = cursor_position * img_size + min_dist = int(min_dist_p / 2.0 * img_size) + max_dist = int(max_dist_p / 2.0 * img_size) + rand_cursor_offset = np.random.randint(min_dist, max_dist, size=cursor_pos_large.shape) + rand_cursor_offset_sign = np.random.randint(0, 1 + 1, size=cursor_pos_large.shape) + rand_cursor_offset_sign[rand_cursor_offset_sign == 0] = -1 + rand_cursor_offset = rand_cursor_offset * rand_cursor_offset_sign + + new_cursor_pos_large = cursor_pos_large + rand_cursor_offset + new_cursor_pos_large = np.minimum(np.maximum(new_cursor_pos_large, 0), img_size - 1) # (2), large-level + new_cursor_pos = new_cursor_pos_large.astype(np.float32) / float(img_size) + return new_cursor_pos + + input_image = 1.0 - input_image_[0] # (image_size, image_size, 3), [0-BG, 1-stroke] + img_size = input_image.shape[0] + + new_cursor_pos = [] + for cursor_i in range(current_pos_list.shape[0]): + curr_cursor = current_pos_list[cursor_i][0] + + for trial_i in range(trial_times): + new_cursor = randomly_move_cursor(curr_cursor, img_size, move_min_dist, move_max_dist) # (2), [0.0, 1.0) + + if isvalid_cursor(input_image, new_cursor, patch_size, img_size) or trial_i == trial_times - 1: + new_cursor_pos.append(new_cursor) + break + + assert len(new_cursor_pos) == current_pos_list.shape[0] + new_cursor_pos = np.expand_dims(np.stack(new_cursor_pos, axis=0), axis=1) # (select_times, 1, 2), [0.0, 1.0) + return new_cursor_pos + + +def sample(sess, model, input_photos, init_cursor, image_size, init_len, seq_lens, + state_dependent, pasting_func, round_stop_state_num, + min_dist_p, max_dist_p): + """Samples a sequence from a pre-trained model.""" + select_times = 1 + curr_canvas = np.zeros(dtype=np.float32, + shape=(select_times, image_size, image_size)) # [0.0-BG, 1.0-stroke] + + initial_state = sess.run(model.initial_state) + + params_list = [[] for _ in range(select_times)] + state_raw_list = [[] for _ in range(select_times)] + state_soft_list = [[] for _ in range(select_times)] + window_size_list = [[] for _ in range(select_times)] + + round_cursor_list = [] + round_length_real_list = [] + + input_photos_tiles = np.tile(input_photos, (select_times, 1, 1, 1)) + + for cursor_i, seq_len in enumerate(seq_lens): + if cursor_i == 0: + cursor_pos = np.squeeze(init_cursor, axis=0) # (select_times, 1, 2) + else: + cursor_pos = move_cursor_to_undrawn(cursor_pos, input_photos, model.hps.raster_size, + min_dist_p, max_dist_p) # (select_times, 1, 2) + round_cursor_list.append(cursor_pos) + + prev_state = initial_state + prev_width = np.stack([model.hps.min_width for _ in range(select_times)], axis=0) + prev_scaling = np.ones((select_times), dtype=np.float32) # (N) + prev_window_size = np.ones((select_times), dtype=np.float32) * model.hps.raster_size # (N) + + continuous_one_state_num = 0 + + for i in range(seq_len): + if not state_dependent and i % init_len == 0: + prev_state = initial_state + + curr_window_size = prev_scaling * prev_window_size # (N) + curr_window_size = np.maximum(curr_window_size, model.hps.min_window_size) + curr_window_size = np.minimum(curr_window_size, image_size) + + feed = { + model.initial_state: prev_state, + model.input_photo: input_photos_tiles, + model.curr_canvas_hard: curr_canvas.copy(), + model.cursor_position: cursor_pos, + model.image_size: image_size, + model.init_width: prev_width, + model.init_scaling: prev_scaling, + model.init_window_size: prev_window_size, + } + + o_other_params_list, o_pen_list, o_pred_params_list, next_state_list = \ + sess.run([model.other_params, model.pen_ras, model.pred_params, model.final_state], feed_dict=feed) + # o_other_params: (N, 6), o_pen: (N, 2), pred_params: (N, 1, 7), next_state: (N, 1024) + # o_other_params: [tanh*2, sigmoid*2, tanh*2, sigmoid*2] + + idx_eos_list = np.argmax(o_pen_list, axis=1) # (N) + + output_i = 0 + idx_eos = idx_eos_list[output_i] + + eos = [0, 0] + eos[idx_eos] = 1 + + other_params = o_other_params_list[output_i].tolist() # (6) + params_list[output_i].append([eos[1]] + other_params) + state_raw_list[output_i].append(o_pen_list[output_i][1]) + state_soft_list[output_i].append(o_pred_params_list[output_i, 0, 0]) + window_size_list[output_i].append(curr_window_size[output_i]) + + # draw the stroke and add to the canvas + x1y1, x2y2, width2 = o_other_params_list[output_i, 0:2], o_other_params_list[output_i, 2:4], \ + o_other_params_list[output_i, 4] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width[output_i], width2], axis=0) # (2) + o_other_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1).tolist() # (8) + + if idx_eos == 0: + f = o_other_params_proc + [1.0, 1.0] + pred_stroke_img, _ = draw(f) # (raster_size, raster_size), [0.0-stroke, 1.0-BG] + pred_stroke_img_large = image_pasting_v3_testing(1.0 - pred_stroke_img, + cursor_pos[output_i, 0], + image_size, + curr_window_size[output_i], + pasting_func, sess) # [0.0-BG, 1.0-stroke] + curr_canvas[output_i] += pred_stroke_img_large # [0.0-BG, 1.0-stroke] + + continuous_one_state_num = 0 + else: + continuous_one_state_num += 1 + + curr_canvas = np.clip(curr_canvas, 0.0, 1.0) + + next_width = o_other_params_list[:, 4] # (N) + next_scaling = o_other_params_list[:, 5] + next_window_size = next_scaling * curr_window_size # (N) + next_window_size = np.maximum(next_window_size, model.hps.min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_state = next_state_list + prev_width = next_width * curr_window_size / next_window_size # (N,) + prev_scaling = next_scaling # (N) + prev_window_size = curr_window_size + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = o_other_params_list[:, 2:4] * ( + np.expand_dims(curr_window_size, axis=-1) / 2.0) # (N, 2), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[:, 1:2], new_cursor_offset_next[:, 0:1]], + axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + stroke_position_next = cursor_pos_large[:, 0, :] + new_cursor_offset_next # (N, 2), large-level + + if model.hps.cursor_type == 'next': + cursor_pos_large = stroke_position_next # (N, 2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), + float(image_size - 1)) # (N, 2), large-level + cursor_pos_large = np.expand_dims(cursor_pos_large, axis=1) # (N, 1, 2) + cursor_pos = cursor_pos_large / float(image_size) + + if continuous_one_state_num >= round_stop_state_num or i == seq_len - 1: + round_length_real_list.append(i + 1) + break + + return params_list, state_raw_list, state_soft_list, curr_canvas, window_size_list, \ + round_cursor_list, round_length_real_list + + +def main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, + sampling_num, + min_dist_p, max_dist_p, + longer_infer_lens, round_stop_state_num, + draw_seq=False, draw_order=False, + state_dependent=True): + model_params_default = hparams.get_default_hparams_rough() + model_params = update_hyperparams(model_params_default, model_base_dir, model_name, infer_dataset=test_dataset) + + [test_set, eval_hps_model, sample_hps_model] = \ + load_dataset_testing(test_image_base_dir, test_dataset, test_image_name, model_params) + + test_image_raw_name = test_image_name[:test_image_name.find('.')] + model_dir = os.path.join(model_base_dir, model_name) + + reset_graph() + sampling_model = VirtualSketchingModel(sample_hps_model) + + # differentiable pasting graph + paste_v3_func = DiffPastingV3(sample_hps_model.raster_size) + + tfconfig = tf.ConfigProto() + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + # loads the weights from checkpoint into our model + snapshot_step = load_checkpoint(sess, model_dir, gen_model_pretrain=True) + print('snapshot_step', snapshot_step) + sampling_dir = os.path.join(sampling_base_dir, test_dataset + '__' + model_name) + os.makedirs(sampling_dir, exist_ok=True) + + for sampling_i in range(sampling_num): + input_photos, init_cursors, test_image_size = test_set.get_test_image() + # input_photos: (1, image_size, image_size, 3), [0-stroke, 1-BG] + # init_cursors: (N, 1, 2), in size [0.0, 1.0) + + print() + print(test_image_name, ', image_size:', test_image_size, ', sampling_i:', sampling_i) + print('Processing ...') + + if init_cursors.ndim == 3: + init_cursors = np.expand_dims(init_cursors, axis=0) + + input_photos = input_photos[0:1, :, :, :] + + ori_img = (input_photos.copy()[0] * 255.0).astype(np.uint8) + ori_img_png = Image.fromarray(ori_img, 'RGB') + ori_img_png.save(os.path.join(sampling_dir, test_image_raw_name + '_input.png'), 'PNG') + + # decoding for sampling + strokes_raw_out_list, states_raw_out_list, states_soft_out_list, pred_imgs_out, \ + window_size_out_list, round_new_cursors, round_new_lengths = sample( + sess, sampling_model, input_photos, init_cursors, test_image_size, + eval_hps_model.max_seq_len, longer_infer_lens, state_dependent, paste_v3_func, + round_stop_state_num, min_dist_p, max_dist_p) + # pred_imgs_out: (N, H, W), [0.0-BG, 1.0-stroke] + + print('## round_lengths:', len(round_new_lengths), ':', round_new_lengths) + + output_i = 0 + strokes_raw_out = np.stack(strokes_raw_out_list[output_i], axis=0) + states_raw_out = states_raw_out_list[output_i] + states_soft_out = states_soft_out_list[output_i] + window_size_out = window_size_out_list[output_i] + + multi_cursors = [init_cursors[0, output_i, 0]] + for c_i in range(len(round_new_cursors)): + best_cursor = round_new_cursors[c_i][output_i, 0] # (2) + multi_cursors.append(best_cursor) + assert len(multi_cursors) == len(round_new_lengths) + + print('strokes_raw_out', strokes_raw_out.shape) + + clean_states_soft_out = np.array(states_soft_out) # (N) + + flag_list = strokes_raw_out[:, 0].astype(np.int32) # (N) + drawing_len = len(flag_list) - np.sum(flag_list) + assert drawing_len >= 0 + + # print(' flag raw\t soft\t x1\t\t y1\t\t x2\t\t y2\t\t r2\t\t s2') + for i in range(strokes_raw_out.shape[0]): + flag, x1, y1, x2, y2, r2, s2 = strokes_raw_out[i] + win_size = window_size_out[i] + out_format = '#%d: %d | %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' + out_values = (i, flag, states_raw_out[i], clean_states_soft_out[i], x1, y1, x2, y2, r2, s2) + out_log = out_format % out_values + # print(out_log) + + print('Saving results ...') + # 保存结果 + print("================", sampling_dir, test_image_raw_name + '_' + str(sampling_i)) + save_seq_data(sampling_dir, test_image_raw_name + '_' + str(sampling_i), + strokes_raw_out, multi_cursors, + test_image_size, round_new_lengths, eval_hps_model.min_width) + + draw_strokes(strokes_raw_out, sampling_dir, test_image_raw_name + '_' + str(sampling_i) + '_pred.png', + ori_img, test_image_size, + multi_cursors, round_new_lengths, eval_hps_model.min_width, eval_hps_model.cursor_type, + sample_hps_model.raster_size, sample_hps_model.min_window_size, + sess, + pasting_func=paste_v3_func, + save_seq=draw_seq, draw_order=draw_order) + + +def generate_simple_order_line(model_name, test_image_name, sampling_num): + test_dataset = 'rough_sketches' + # test_image_base_dir = 'sample_inputs' + # test_image_base_dir = 'results/QMUPD_model/test_200/imagesstyle0-0-1' + test_image_base_dir = './' + sampling_base_dir = 'robot_data/sampling' + model_base_dir = 'outputs/snapshot' + + state_dependent = False + longer_infer_lens = [128 for _ in range(10)] + round_stop_state_num = 12 + min_dist_p = 0.3 + max_dist_p = 0.9 + + draw_seq = False + draw_color_order = True + + # set numpy output to something sensible + np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True) + + #main_testing(test_image_base_dir, test_dataset, test_image_name, + # sampling_base_dir, model_base_dir, model_name, sampling_num, + # min_dist_p=min_dist_p, max_dist_p=max_dist_p, + # draw_seq=draw_seq, draw_order=draw_color_order, + # state_dependent=state_dependent, longer_infer_lens=longer_infer_lens, + # round_stop_state_num=round_stop_state_num) + main_testing(output_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, sampling_num, + min_dist_p=min_dist_p, max_dist_p=max_dist_p, + draw_seq=draw_seq, draw_order=draw_color_order, + state_dependent=state_dependent, longer_infer_lens=longer_infer_lens, + round_stop_state_num=round_stop_state_num) + + +def decode_npz_file(npz_file): + data = np.load(npz_file, encoding='latin1', allow_pickle=True) + strokes_data = data['strokes_data'] + init_cursors = data['init_cursors'] + image_size = data['image_size'] + round_length = data['round_length'] + init_width = data['init_width'] + return strokes_data, init_cursors, image_size, round_length, init_width + +def scp_transfer(host, port, username, password, local_path, remote_path): + # 创建一个SSH客户端对象 + ssh = paramiko.SSHClient() + + # 允许连接不在know_hosts文件中的主机 + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + # 连接到SSH服务器 + ssh.connect(host, port, username, password) + + # 使用SSH客户端创建一个SFTP对象 + sftp = ssh.open_sftp() + + # 使用SFTP的put方法上传文件 + sftp.put(local_path, remote_path) + + # 关闭SFTP和SSH连接 + sftp.close() + ssh.close() + +from flask import Flask, request, send_from_directory + +app = Flask(__name__) + +@app.route('/upload', methods=['GET', 'POST']) +def create_upload_file(): + if request.method == 'POST': + print("XXXXXXXXXXXXXXXXXXXXX") + file = request.files['file'] + filename = file.filename + file.save(os.path.join(dataroot, filename)) + return "OK" +import time + + +def transparence2white(img): + sp=img.shape # 获取图片维度 + width=sp[0] # 宽度 + height=sp[1] # 高度 + for yh in range(height): + for xw in range(width): + color_d=img[xw,yh] # 遍历图像每一个点,获取到每个点4通道的颜色数据 + if(color_d.size != 4): #如果图片只有三个通道,也是可以正常处理 + continue + if(color_d[3] ==0): # 最后一个通道为透明度,如果其值为0,即图像是透明 + img[xw,yh]=[255,255,255,255] # 则将当前点的颜色设置为白色,且图像设置为不透明 + return img + +@app.route('/sketch', methods=['POST']) +def sketch(): + # 将文件保存到dataroot文件夹下 + # file = request.files['file'] + # filename = file.filename + # print("XXXXXXXXXXXXXXXXXXXX") + # print(filename) + image_path = request.form.get("image_path") + print("image_path:", image_path) + matting_root = "/home/qian/projects/robot_sketch_draw/image-matting" + filename = image_path.split('/')[-1] + print("将文件存放到input文件夹下") + src_path = os.path.join(matting_root + image_path) + print("src_path:", src_path) + filepath = os.path.join(dataroot,"../input", filename) + shutil.copyfile(src_path, filepath) + + png_image = cv2.imread(filepath, cv2.IMREAD_UNCHANGED) + filepath=filepath.replace(".png", ".jpg") + # 将png背景透明部分设置为白色 + #png_image[np.where((png_image == [0, 0, 0, 0]).all(axis=2))] = [255, 255, 255, 255] + png_image = transparence2white(png_image) + # 转为512*512 + png_image = cv2.resize(png_image, (512, 512)) + cv2.imwrite(filepath, png_image) + + + outimage_path = draw_tools.generate_style_image(filepath, dataroot, output_dir) + outimage_path = outimage_path.split('/')[-1] + # return { + # "sketch_image_url": "./robot_data/output/"+outimage_path, + # "seq_data_file": None + # } + # outimage_path = "robot_data/output/1714032527749_fake.png" + # print(data) + generate_simple_order_line("pretrain_rough_sketches", outimage_path, 1) + + prx = outimage_path.split('.')[0] + # out_png_image = os.path.join("robot_data/sampling/rough_sketches__pretrain_rough_sketches/", f"{prx}_0_pred.png") + out_png_image = os.path.join("robot_data/contour_images/", f"{prx}.png") + seq_data_file = os.path.join("robot_data/sampling/rough_sketches__pretrain_rough_sketches/seq_data/", f"{prx}_0.npz") + # strokes_data, init_cursors, image_size, _, _ = decode_npz_file(seq_data_file) + contours_list = draw_tools.getContourList_v2(seq_data_file) + contours_list = draw_tools.sortContoursList(contours_list) + # 这里设置了一些超参数,4是邻域扩展为4,位于调节线条的稀疏性;0.8 为重叠度或者近邻超过0.8的曲线去除。10为保留最短的轮廓长度 + contours_list = draw_tools.remove_overlap_and_near_contours(contours_list, (512, 512), 3, 0.9, 5) + # 绘制一个轮廓线图像,并保存 + contour_image = draw_tools.drawContours(contours_list, (512, 512,3)) + # util.mkdirs('robot_data/contour_images') + # 平滑和采样 + #contours_lists = draw_tools.sample_and_smooth_contours(contours_list, 10) + # prx = seq_data_file.split('/')[-1].split('.')[0] + cv2.imwrite(f"robot_data/contour_images/{prx}.png", contour_image) + # prx = prx.split('_')[0:1] + draw_tools.save_contour_points(contours_list, f"robot_data/contour_points/{prx}_contour_points.txt") + return { + "sketch_image_url": out_png_image, + "seq_data_file": seq_data_file + } + +@app.route('/drawing', methods=['GET', 'POST']) +def drawing(): + seq_data_file = request.form.get("seq_data_file") + print("seq_data_file:", seq_data_file) + # TODO: 临时代码,强制转换成contour_points路径 + # seq_data_file = robot_data/sampling/rough_sketches__pretrain_rough_sketches/seq_data/{prx}_0.npz + # 转为contours_path 为 f"robot_data/contour_points/{prx}_contour_points.txt" + prx = seq_data_file.split('/')[-1].split('_')[:-1] + prx = "_".join(prx) + contours_list_path = f"./robot_data/contour_points/{prx}_contour_points.txt" + print(contours_list_path) + scp_transfer('192.168.253.95', 22, "root", "root", contours_list_path, "/home/robot/Work/system/bspline.txt") + return "OK" + +@app.route('/') +def hello(): + return "hello" + + +@app.route('/files/') +def serve_file(filename): + return send_from_directory('', filename) + +if __name__ == '__main__': + warnings.filterwarnings("ignore", category=FutureWarning) + parser = argparse.ArgumentParser() + parser.add_argument('--sample', '-s', type=int, default=1, help="The number of outputs.") + parser.add_argument('--name', '-n', type=str, default="", help="The name of the image.") + args = parser.parse_args() + if args.name == "": + app = CameraApp() # 创建 CameraApp 对象,启动程序 + # # # 获得图像名称 + # # # image_name = "./robot_data/input/1714032527749.jpg" + # # # image = cv2.imread(image_name, cv2.IMREAD_COLOR) + image = app.last_photo + image_name = app.last_photo_name + else: + image_name = args.name + + filepath = image_name + outimage_path = draw_tools.generate_style_image(filepath, dataroot, output_dir) + outimage_path = outimage_path.split('/')[-1] + # outimage_path = "robot_data/output/1714032527749_fake.png" + # print(data) + generate_simple_order_line("pretrain_rough_sketches", outimage_path, 1) + prx = outimage_path.split('.')[0] + out_png_image = os.path.join("robot_data/sampling/rough_sketches__pretrain_rough_sketches/", f"{prx}_0_pred.png") + seq_data_file = os.path.join("robot_data/sampling/rough_sketches__pretrain_rough_sketches/seq_data/", f"{prx}_0.npz") + # strokes_data, init_cursors, image_size, _, _ = decode_npz_file(seq_data_file) + contours_list = draw_tools.getContourList_v2(seq_data_file) + cv2.imshow("origin contours", draw_tools.drawContours(contours_list, (512, 512,3))) + contours_list = draw_tools.sortContoursList(contours_list) + cv2.imshow("sorted contours", draw_tools.drawContours(contours_list, (512, 512,3))) + # 这里设置了一些超参数,4是邻域扩展为4,位于调节线条的稀疏性;0.8 为重叠度或者近邻超过0.8的曲线去除。10为保留最短的轮廓长度 + contours_list = draw_tools.remove_overlap_and_near_contours(contours_list, (512, 512), 4, 0.7, 10) + cv2.imshow("remove overlap contours", draw_tools.drawContours(contours_list, (512, 512,3))) + # 平滑和采样 + #contours_lists = draw_tools.sample_and_smooth_contours(contours_list, 10) + # simple_image = cv2.imread(out_png_image) + # contours_list = draw_tools.getContourList(simple_image, 4, 100, 1) + draw_tools.save_contour_points(contours_list, f"robot_data/contour_points/{prx}_contour_points.txt") + cv2.waitKey(0) + # return "OK"/ + # print("image_name:", image_name) + #image_name = "robot_data/input/1714032527749.jpg" + # # 生成风格图像 + # import uvicorn + # default_bind_host = "0.0.0.0" + # uvicorn.run(app, host=default_bind_host, port=8002) + + diff --git a/robot_painting/qmupd_vs/model_common_test.py b/robot_painting/qmupd_vs/model_common_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ff5c40dfdf03363b9d14515fe96e5ed9bbd15ce2 --- /dev/null +++ b/robot_painting/qmupd_vs/model_common_test.py @@ -0,0 +1,607 @@ +import rnn +import tensorflow as tf + +from subnet_tf_utils import generative_cnn_encoder, generative_cnn_encoder_deeper, generative_cnn_encoder_deeper13, \ + generative_cnn_c3_encoder, generative_cnn_c3_encoder_deeper, generative_cnn_c3_encoder_deeper13, \ + generative_cnn_c3_encoder_combine33, generative_cnn_c3_encoder_combine43, \ + generative_cnn_c3_encoder_combine53, generative_cnn_c3_encoder_combineFC, \ + generative_cnn_c3_encoder_deeper13_attn + + +class DiffPastingV3(object): + def __init__(self, raster_size): + self.patch_canvas = tf.compat.v1.placeholder(dtype=tf.float32, + shape=(None, None, 1)) # (raster_size, raster_size, 1), [0.0-BG, 1.0-stroke] + self.cursor_pos_a = tf.compat.v1.placeholder(dtype=tf.float32, shape=(2)) # (2), float32, in large size + self.image_size_a = tf.compat.v1.placeholder(dtype=tf.int32, shape=()) # () + self.window_size_a = tf.compat.v1.placeholder(dtype=tf.float32, shape=()) # (), float32, with grad + self.raster_size_a = float(raster_size) + + self.pasted_image = self.image_pasting_sampling_v3() + # (image_size, image_size, 1), [0.0-BG, 1.0-stroke] + + def image_pasting_sampling_v3(self): + padding_size = tf.cast(tf.ceil(self.window_size_a / 2.0), tf.int32) + + x1y1_a = self.cursor_pos_a - self.window_size_a / 2.0 # (2), float32 + x2y2_a = self.cursor_pos_a + self.window_size_a / 2.0 # (2), float32 + + x1y1_a_floor = tf.floor(x1y1_a) # (2) + x2y2_a_ceil = tf.ceil(x2y2_a) # (2) + + cursor_pos_b_oricoord = (x1y1_a_floor + x2y2_a_ceil) / 2.0 # (2) + cursor_pos_b = (cursor_pos_b_oricoord - x1y1_a) / self.window_size_a * self.raster_size_a # (2) + raster_size_b = (x2y2_a_ceil - x1y1_a_floor) # (x, y) + image_size_b = self.raster_size_a + window_size_b = self.raster_size_a * (raster_size_b / self.window_size_a) # (x, y) + + cursor_b_x, cursor_b_y = tf.split(cursor_pos_b, 2, axis=-1) # (1) + + y1_b = cursor_b_y - (window_size_b[1] - 1.) / 2. + x1_b = cursor_b_x - (window_size_b[0] - 1.) / 2. + y2_b = y1_b + (window_size_b[1] - 1.) + x2_b = x1_b + (window_size_b[0] - 1.) + boxes_b = tf.concat([y1_b, x1_b, y2_b, x2_b], axis=-1) # (4) + boxes_b = boxes_b / tf.cast(image_size_b - 1, tf.float32) # with grad to window_size_a + + box_ind_b = tf.ones((1), dtype=tf.int32) # (1) + box_ind_b = tf.cumsum(box_ind_b) - 1 + + patch_canvas = tf.expand_dims(self.patch_canvas, + axis=0) # (1, raster_size, raster_size, 1), [0.0-BG, 1.0-stroke] + boxes_b = tf.expand_dims(boxes_b, axis=0) # (1, 4) + + valid_canvas = tf.image.crop_and_resize(patch_canvas, boxes_b, box_ind_b, + crop_size=[raster_size_b[1], raster_size_b[0]]) + valid_canvas = valid_canvas[0] # (raster_size_b, raster_size_b, 1) + + pad_up = tf.cast(x1y1_a_floor[1], tf.int32) + padding_size + pad_down = self.image_size_a + padding_size - tf.cast(x2y2_a_ceil[1], tf.int32) + pad_left = tf.cast(x1y1_a_floor[0], tf.int32) + padding_size + pad_right = self.image_size_a + padding_size - tf.cast(x2y2_a_ceil[0], tf.int32) + + paddings = [[pad_up, pad_down], + [pad_left, pad_right], + [0, 0]] + pad_img = tf.pad(valid_canvas, paddings=paddings, mode='CONSTANT', + constant_values=0.0) # (H_p, W_p, 1), [0.0-BG, 1.0-stroke] + + pasted_image = pad_img[padding_size: padding_size + self.image_size_a, + padding_size: padding_size + self.image_size_a, :] + # (image_size, image_size, 1), [0.0-BG, 1.0-stroke] + return pasted_image + + +class VirtualSketchingModel(object): + def __init__(self, hps, gpu_mode=True, reuse=False): + """Initializer for the model. + + Args: + hps: a HParams object containing model hyperparameters + gpu_mode: a boolean that when True, uses GPU mode. + reuse: a boolean that when true, attemps to reuse variables. + """ + self.hps = hps + assert hps.model_mode in ['train', 'eval', 'eval_sample', 'sample'] + # with tf.variable_scope('SCC', reuse=reuse): + if not gpu_mode: + with tf.device('/cpu:0'): + print('Model using cpu.') + self.build_model() + else: + print('-' * 100) + print('model_mode:', hps.model_mode) + print('Model using gpu.') + self.build_model() + + def build_model(self): + """Define model architecture.""" + self.config_model() + + initial_state = self.get_decoder_inputs() + self.initial_state = initial_state + + ## use pred as the prev points + print(self.image_size) + other_params, pen_ras, final_state = self.get_points_and_raster_image(self.image_size) + + # other_params: (N * max_seq_len, 6) + # pen_ras: (N * max_seq_len, 2), after softmax + + self.other_params = other_params # (N * max_seq_len, 6) + self.pen_ras = pen_ras # (N * max_seq_len, 2), after softmax + self.final_state = final_state + + if not self.hps.use_softargmax: + pen_state_soft = pen_ras[:, 1:2] # (N * max_seq_len, 1) + else: + pen_state_soft = self.differentiable_argmax(pen_ras, self.hps.soft_beta) # (N * max_seq_len, 1) + + pred_params = tf.concat([pen_state_soft, other_params], axis=1) # (N * max_seq_len, 7) + self.pred_params = tf.reshape(pred_params, shape=[-1, self.hps.max_seq_len, 7]) # (N, max_seq_len, 7) + # pred_params: (N, max_seq_len, 7) + + def config_model(self): + if self.hps.model_mode == 'train': + self.global_step = tf.Variable(0, name='global_step', trainable=False) + + if self.hps.dec_model == 'lstm': + dec_cell_fn = rnn.LSTMCell + elif self.hps.dec_model == 'layer_norm': + dec_cell_fn = rnn.LayerNormLSTMCell + elif self.hps.dec_model == 'hyper': + dec_cell_fn = rnn.HyperLSTMCell + else: + assert False, 'please choose a respectable cell' + + use_recurrent_dropout = self.hps.use_recurrent_dropout + use_input_dropout = self.hps.use_input_dropout + use_output_dropout = self.hps.use_output_dropout + + dec_cell = dec_cell_fn( + self.hps.dec_rnn_size, + use_recurrent_dropout=use_recurrent_dropout, + dropout_keep_prob=self.hps.recurrent_dropout_prob) + + # dropout: + # print('Input dropout mode = %s.' % use_input_dropout) + # print('Output dropout mode = %s.' % use_output_dropout) + # print('Recurrent dropout mode = %s.' % use_recurrent_dropout) + if use_input_dropout: + print('Dropout to input w/ keep_prob = %4.4f.' % self.hps.input_dropout_prob) + dec_cell = tf.contrib.rnn.DropoutWrapper( + dec_cell, input_keep_prob=self.hps.input_dropout_prob) + if use_output_dropout: + print('Dropout to output w/ keep_prob = %4.4f.' % self.hps.output_dropout_prob) + dec_cell = tf.contrib.rnn.DropoutWrapper( + dec_cell, output_keep_prob=self.hps.output_dropout_prob) + self.dec_cell = dec_cell + + self.input_photo = tf.compat.v1.placeholder(dtype=tf.float32, + shape=[self.hps.batch_size, None, None, self.hps.input_channel]) # [0.0-stroke, 1.0-BG] + self.init_cursor = tf.compat.v1.placeholder( + dtype=tf.float32, + shape=[self.hps.batch_size, 1, 2]) # (N, 1, 2), in size [0.0, 1.0) + self.init_width = tf.compat.v1.placeholder( + dtype=tf.float32, + shape=[self.hps.batch_size]) # (1), in [0.0, 1.0] + self.init_scaling = tf.compat.v1.placeholder( + dtype=tf.float32, + shape=[self.hps.batch_size]) # (N), in [0.0, 1.0] + self.init_window_size = tf.compat.v1.placeholder( + dtype=tf.float32, + shape=[self.hps.batch_size]) # (N) + self.image_size = tf.compat.v1.placeholder(dtype=tf.int32, shape=()) # () + + ########################### + + def normalize_image_m1to1(self, in_img_0to1): + norm_img_m1to1 = tf.multiply(in_img_0to1, 2.0) + norm_img_m1to1 = tf.subtract(norm_img_m1to1, 1.0) + return norm_img_m1to1 + + def add_coords(self, input_tensor): + batch_size_tensor = tf.shape(input_tensor)[0] # get N size + + xx_ones = tf.ones([batch_size_tensor, self.hps.raster_size], dtype=tf.int32) # e.g. (N, raster_size) + xx_ones = tf.expand_dims(xx_ones, -1) # e.g. (N, raster_size, 1) + xx_range = tf.tile(tf.expand_dims(tf.range(self.hps.raster_size), 0), + [batch_size_tensor, 1]) # e.g. (N, raster_size) + xx_range = tf.expand_dims(xx_range, 1) # e.g. (N, 1, raster_size) + + xx_channel = tf.matmul(xx_ones, xx_range) # e.g. (N, raster_size, raster_size) + xx_channel = tf.expand_dims(xx_channel, -1) # e.g. (N, raster_size, raster_size, 1) + + yy_ones = tf.ones([batch_size_tensor, self.hps.raster_size], dtype=tf.int32) # e.g. (N, raster_size) + yy_ones = tf.expand_dims(yy_ones, 1) # e.g. (N, 1, raster_size) + yy_range = tf.tile(tf.expand_dims(tf.range(self.hps.raster_size), 0), + [batch_size_tensor, 1]) # (N, raster_size) + yy_range = tf.expand_dims(yy_range, -1) # e.g. (N, raster_size, 1) + + yy_channel = tf.matmul(yy_range, yy_ones) # e.g. (N, raster_size, raster_size) + yy_channel = tf.expand_dims(yy_channel, -1) # e.g. (N, raster_size, raster_size, 1) + + xx_channel = tf.cast(xx_channel, 'float32') / (self.hps.raster_size - 1) + yy_channel = tf.cast(yy_channel, 'float32') / (self.hps.raster_size - 1) + # xx_channel = xx_channel * 2 - 1 # [-1, 1] + # yy_channel = yy_channel * 2 - 1 + + ret = tf.concat([ + input_tensor, + xx_channel, + yy_channel, + ], axis=-1) # e.g. (N, raster_size, raster_size, 4) + + return ret + + def build_combined_encoder(self, patch_canvas, patch_photo, entire_canvas, entire_photo, cursor_pos, + image_size, window_size): + """ + :param patch_canvas: (N, raster_size, raster_size, 1), [-1.0-stroke, 1.0-BG] + :param patch_photo: (N, raster_size, raster_size, 1/3), [-1.0-stroke, 1.0-BG] + :param entire_canvas: (N, image_size, image_size, 1), [0.0-stroke, 1.0-BG] + :param entire_photo: (N, image_size, image_size, 1/3), [0.0-stroke, 1.0-BG] + :param cursor_pos: (N, 1, 2), in size [0.0, 1.0) + :param window_size: (N, 1, 1), float, in large size + :return: + """ + if self.hps.resize_method == 'BILINEAR': + resize_method = tf.image.ResizeMethod.BILINEAR + elif self.hps.resize_method == 'NEAREST_NEIGHBOR': + resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR + elif self.hps.resize_method == 'BICUBIC': + resize_method = tf.image.ResizeMethod.BICUBIC + elif self.hps.resize_method == 'AREA': + resize_method = tf.image.ResizeMethod.AREA + else: + raise Exception('unknown resize_method', self.hps.resize_method) + + patch_photo = tf.stop_gradient(patch_photo) + patch_canvas = tf.stop_gradient(patch_canvas) + cursor_pos = tf.stop_gradient(cursor_pos) + window_size = tf.stop_gradient(window_size) + + entire_photo_small = tf.stop_gradient(tf.image.resize_images(entire_photo, + (self.hps.raster_size, self.hps.raster_size), + method=resize_method)) + entire_canvas_small = tf.stop_gradient(tf.image.resize_images(entire_canvas, + (self.hps.raster_size, self.hps.raster_size), + method=resize_method)) + entire_photo_small = self.normalize_image_m1to1(entire_photo_small) # [-1.0-stroke, 1.0-BG] + entire_canvas_small = self.normalize_image_m1to1(entire_canvas_small) # [-1.0-stroke, 1.0-BG] + + if self.hps.encode_cursor_type == 'value': + cursor_pos_norm = tf.expand_dims(cursor_pos, axis=1) # (N, 1, 1, 2) + cursor_pos_norm = tf.tile(cursor_pos_norm, [1, self.hps.raster_size, self.hps.raster_size, 1]) + cursor_info = cursor_pos_norm + else: + raise Exception('Unknown encode_cursor_type', self.hps.encode_cursor_type) + + batch_input_combined = tf.concat([patch_photo, patch_canvas, entire_photo_small, entire_canvas_small, cursor_info], + axis=-1) # [N, raster_size, raster_size, 6/10] + batch_input_local = tf.concat([patch_photo, patch_canvas], axis=-1) # [N, raster_size, raster_size, 2/4] + batch_input_global = tf.concat([entire_photo_small, entire_canvas_small, cursor_info], + axis=-1) # [N, raster_size, raster_size, 4/6] + + if self.hps.model_mode == 'train': + is_training = True + dropout_keep_prob = self.hps.pix_drop_kp + else: + is_training = False + dropout_keep_prob = 1.0 + + if self.hps.add_coordconv: + batch_input_combined = self.add_coords(batch_input_combined) # (N, in_H, in_W, in_dim + 2) + batch_input_local = self.add_coords(batch_input_local) # (N, in_H, in_W, in_dim + 2) + batch_input_global = self.add_coords(batch_input_global) # (N, in_H, in_W, in_dim + 2) + + if 'combine' in self.hps.encoder_type: + if self.hps.encoder_type == 'combine33': + image_embedding, _ = generative_cnn_c3_encoder_combine33(batch_input_local, batch_input_global, + is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'combine43': + image_embedding, _ = generative_cnn_c3_encoder_combine43(batch_input_local, batch_input_global, + is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'combine53': + image_embedding, _ = generative_cnn_c3_encoder_combine53(batch_input_local, batch_input_global, + is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'combineFC': + image_embedding, _ = generative_cnn_c3_encoder_combineFC(batch_input_local, batch_input_global, + is_training, dropout_keep_prob) # (N, 256) + else: + raise Exception('Unknown encoder_type', self.hps.encoder_type) + else: + with tf.variable_scope('Combined_Encoder', reuse=tf.AUTO_REUSE): + if self.hps.encoder_type == 'conv10': + image_embedding, _ = generative_cnn_encoder(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'conv10_deep': + image_embedding, _ = generative_cnn_encoder_deeper(batch_input_combined, is_training, dropout_keep_prob) # (N, 512) + elif self.hps.encoder_type == 'conv13': + image_embedding, _ = generative_cnn_encoder_deeper13(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'conv10_c3': + image_embedding, _ = generative_cnn_c3_encoder(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'conv10_deep_c3': + image_embedding, _ = generative_cnn_c3_encoder_deeper(batch_input_combined, is_training, dropout_keep_prob) # (N, 512) + elif self.hps.encoder_type == 'conv13_c3': + image_embedding, _ = generative_cnn_c3_encoder_deeper13(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'conv13_c3_attn': + image_embedding, _ = generative_cnn_c3_encoder_deeper13_attn(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + else: + raise Exception('Unknown encoder_type', self.hps.encoder_type) + return image_embedding + + def build_seq_decoder(self, dec_cell, actual_input_x, initial_state): + rnn_output, last_state = self.rnn_decoder(dec_cell, initial_state, actual_input_x) + rnn_output_flat = tf.reshape(rnn_output, [-1, self.hps.dec_rnn_size]) + + pen_n_out = 2 + params_n_out = 6 + + with tf.variable_scope('DEC_RNN_out_pen', reuse=tf.AUTO_REUSE): + output_w_pen = tf.get_variable('output_w', [self.hps.dec_rnn_size, pen_n_out]) + output_b_pen = tf.get_variable('output_b', [pen_n_out], initializer=tf.constant_initializer(0.0)) + output_pen = tf.nn.xw_plus_b(rnn_output_flat, output_w_pen, output_b_pen) # (N, pen_n_out) + + with tf.variable_scope('DEC_RNN_out_params', reuse=tf.AUTO_REUSE): + output_w_params = tf.get_variable('output_w', [self.hps.dec_rnn_size, params_n_out]) + output_b_params = tf.get_variable('output_b', [params_n_out], initializer=tf.constant_initializer(0.0)) + output_params = tf.nn.xw_plus_b(rnn_output_flat, output_w_params, output_b_params) # (N, params_n_out) + + output = tf.concat([output_pen, output_params], axis=1) # (N, n_out) + + return output, last_state + + def get_mixture_coef(self, outputs): + z = outputs + z_pen_logits = z[:, 0:2] # (N, 2), pen states + z_other_params_logits = z[:, 2:] # (N, 6) + + z_pen = tf.nn.softmax(z_pen_logits) # (N, 2) + if self.hps.position_format == 'abs': + x1y1 = tf.nn.sigmoid(z_other_params_logits[:, 0:2]) # (N, 2) + x2y2 = tf.tanh(z_other_params_logits[:, 2:4]) # (N, 2) + widths = tf.nn.sigmoid(z_other_params_logits[:, 4:5]) # (N, 1) + widths = tf.add(tf.multiply(widths, 1.0 - self.hps.min_width), self.hps.min_width) + scaling = tf.nn.sigmoid(z_other_params_logits[:, 5:6]) * self.hps.max_scaling # (N, 1), [0.0, max_scaling] + # scaling = tf.add(tf.multiply(scaling, (self.hps.max_scaling - self.hps.min_scaling) / self.hps.max_scaling), + # self.hps.min_scaling) + z_other_params = tf.concat([x1y1, x2y2, widths, scaling], axis=-1) # (N, 6) + else: # "rel" + raise Exception('Unknown position_format', self.hps.position_format) + + r = [z_other_params, z_pen] + return r + + ########################### + + def get_decoder_inputs(self): + initial_state = self.dec_cell.zero_state(batch_size=self.hps.batch_size, dtype=tf.float32) + return initial_state + + def rnn_decoder(self, dec_cell, initial_state, actual_input_x): + with tf.variable_scope("RNN_DEC", reuse=tf.AUTO_REUSE): + output, last_state = tf.nn.dynamic_rnn( + dec_cell, + actual_input_x, + initial_state=initial_state, + time_major=False, + swap_memory=True, + dtype=tf.float32) + return output, last_state + + ########################### + + def image_padding(self, ori_image, window_size, pad_value): + """ + Pad with (bg) + :param ori_image: + :return: + """ + paddings = [[0, 0], + [window_size // 2, window_size // 2], + [window_size // 2, window_size // 2], + [0, 0]] + pad_img = tf.pad(ori_image, paddings=paddings, mode='CONSTANT', constant_values=pad_value) # (N, H_p, W_p, k) + return pad_img + + def image_cropping_fn(self, fn_inputs): + """ + crop the patch + :return: + """ + index_offset = self.hps.input_channel - 1 + input_image = fn_inputs[:, :, 0:2 + index_offset] # (image_size, image_size, -), [0.0-BG, 1.0-stroke] + cursor_pos = fn_inputs[0, 0, 2 + index_offset:4 + index_offset] # (2), in [0.0, 1.0) + image_size = fn_inputs[0, 0, 4 + index_offset] # (), float32 + window_size = tf.cast(fn_inputs[0, 0, 5 + index_offset], tf.int32) # () + + input_img_reshape = tf.expand_dims(input_image, axis=0) + pad_img = self.image_padding(input_img_reshape, window_size, pad_value=0.0) + + cursor_pos = tf.cast(tf.round(tf.multiply(cursor_pos, image_size)), dtype=tf.int32) + x0, x1 = cursor_pos[0], cursor_pos[0] + window_size # () + y0, y1 = cursor_pos[1], cursor_pos[1] + window_size # () + patch_image = pad_img[:, y0:y1, x0:x1, :] # (1, window_size, window_size, 2/4) + + # resize to raster_size + patch_image_scaled = tf.image.resize_images(patch_image, (self.hps.raster_size, self.hps.raster_size), + method=tf.image.ResizeMethod.AREA) + patch_image_scaled = tf.squeeze(patch_image_scaled, axis=0) + # patch_canvas_scaled: (raster_size, raster_size, 2/4), [0.0-BG, 1.0-stroke] + + return patch_image_scaled + + def image_cropping(self, cursor_position, input_img, image_size, window_sizes): + """ + :param cursor_position: (N, 1, 2), float type, in size [0.0, 1.0) + :param input_img: (N, image_size, image_size, 2/4), [0.0-BG, 1.0-stroke] + :param window_sizes: (N, 1, 1), float32, with grad + """ + input_img_ = input_img + window_sizes_non_grad = tf.stop_gradient(tf.round(window_sizes)) # (N, 1, 1), no grad + + cursor_position_ = tf.reshape(cursor_position, (-1, 1, 1, 2)) # (N, 1, 1, 2) + cursor_position_ = tf.tile(cursor_position_, [1, image_size, image_size, 1]) # (N, image_size, image_size, 2) + + image_size_ = tf.reshape(tf.cast(image_size, tf.float32), (1, 1, 1, 1)) # (1, 1, 1, 1) + image_size_ = tf.tile(image_size_, [self.hps.batch_size, image_size, image_size, 1]) + + window_sizes_ = tf.reshape(window_sizes_non_grad, (-1, 1, 1, 1)) # (N, 1, 1, 1) + window_sizes_ = tf.tile(window_sizes_, [1, image_size, image_size, 1]) # (N, image_size, image_size, 1) + + fn_inputs = tf.concat([input_img_, cursor_position_, image_size_, window_sizes_], + axis=-1) # (N, image_size, image_size, 2/4 + 4) + curr_patch_imgs = tf.map_fn(self.image_cropping_fn, fn_inputs, parallel_iterations=32) # (N, raster_size, raster_size, -) + return curr_patch_imgs + + def image_cropping_v3(self, cursor_position, input_img, image_size, window_sizes): + """ + :param cursor_position: (N, 1, 2), float type, in size [0.0, 1.0) + :param input_img: (N, image_size, image_size, k), [0.0-BG, 1.0-stroke] + :param window_sizes: (N, 1, 1), float32, with grad + """ + window_sizes_non_grad = tf.stop_gradient(window_sizes) # (N, 1, 1), no grad + + cursor_pos = tf.multiply(cursor_position, tf.cast(image_size, tf.float32)) + print(cursor_pos) + cursor_x, cursor_y = tf.split(cursor_pos, 2, axis=-1) # (N, 1, 1) + + y1 = cursor_y - (window_sizes_non_grad - 1.0) / 2 + x1 = cursor_x - (window_sizes_non_grad - 1.0) / 2 + y2 = y1 + (window_sizes_non_grad - 1.0) + x2 = x1 + (window_sizes_non_grad - 1.0) + boxes = tf.concat([y1, x1, y2, x2], axis=-1) # (N, 1, 4) + boxes = tf.squeeze(boxes, axis=1) # (N, 4) + boxes = boxes / tf.cast(image_size - 1, tf.float32) + + box_ind = tf.ones_like(cursor_x)[:, 0, 0] # (N) + box_ind = tf.cast(box_ind, dtype=tf.int32) + box_ind = tf.cumsum(box_ind) - 1 + + curr_patch_imgs = tf.image.crop_and_resize(input_img, boxes, box_ind, + crop_size=[self.hps.raster_size, self.hps.raster_size]) + # (N, raster_size, raster_size, k), [0.0-BG, 1.0-stroke] + return curr_patch_imgs + + def get_points_and_raster_image(self, image_size): + ## generate the other_params and pen_ras and raster image for raster loss + prev_state = self.initial_state # (N, dec_rnn_size * 3) + + prev_width = self.init_width # (N) + prev_width = tf.expand_dims(tf.expand_dims(prev_width, axis=-1), axis=-1) # (N, 1, 1) + + prev_scaling = self.init_scaling # (N) + prev_scaling = tf.reshape(prev_scaling, (-1, 1, 1)) # (N, 1, 1) + + prev_window_size = self.init_window_size # (N) + prev_window_size = tf.reshape(prev_window_size, (-1, 1, 1)) # (N, 1, 1) + + cursor_position_temp = self.init_cursor + self.cursor_position = cursor_position_temp # (N, 1, 2), in size [0.0, 1.0) + cursor_position_loop = self.cursor_position + + other_params_list = [] + pen_ras_list = [] + + curr_canvas_soft = tf.zeros_like(self.input_photo[:, :, :, 0]) # (N, image_size, image_size), [0.0-BG, 1.0-stroke] + curr_canvas_hard = tf.zeros_like(curr_canvas_soft) # [0.0-BG, 1.0-stroke] + + #### sampling part - start #### + self.curr_canvas_hard = curr_canvas_hard + + if self.hps.cropping_type == 'v3': + cropping_func = self.image_cropping_v3 + # elif self.hps.cropping_type == 'v2': + # cropping_func = self.image_cropping + else: + raise Exception('Unknown cropping_type', self.hps.cropping_type) + + for time_i in range(self.hps.max_seq_len): + cursor_position_non_grad = tf.stop_gradient(cursor_position_loop) # (N, 1, 2), in size [0.0, 1.0) + + curr_window_size = tf.multiply(prev_scaling, tf.stop_gradient(prev_window_size)) # float, with grad + curr_window_size = tf.maximum(curr_window_size, tf.cast(self.hps.min_window_size, tf.float32)) + curr_window_size = tf.minimum(curr_window_size, tf.cast(image_size, tf.float32)) + + ## patch-level encoding + # Here, we make the gradients from canvas_z to curr_canvas_hard be None to avoid recurrent gradient propagation. + curr_canvas_hard_non_grad = tf.stop_gradient(self.curr_canvas_hard) + curr_canvas_hard_non_grad = tf.expand_dims(curr_canvas_hard_non_grad, axis=-1) + + # input_photo: (N, image_size, image_size, 1/3), [0.0-stroke, 1.0-BG] + crop_inputs = tf.concat([1.0 - self.input_photo, curr_canvas_hard_non_grad], axis=-1) # (N, H_p, W_p, 1+1) + + cropped_outputs = cropping_func(cursor_position_non_grad, crop_inputs, image_size, curr_window_size) + index_offset = self.hps.input_channel - 1 + curr_patch_inputs = cropped_outputs[:, :, :, 0:1 + index_offset] # [0.0-BG, 1.0-stroke] + curr_patch_canvas_hard_non_grad = cropped_outputs[:, :, :, 1 + index_offset:2 + index_offset] + # (N, raster_size, raster_size, 1/3), [0.0-BG, 1.0-stroke] + + curr_patch_inputs = 1.0 - curr_patch_inputs # [0.0-stroke, 1.0-BG] + curr_patch_inputs = self.normalize_image_m1to1(curr_patch_inputs) + # (N, raster_size, raster_size, 1/3), [-1.0-stroke, 1.0-BG] + + # Normalizing image + curr_patch_canvas_hard_non_grad = 1.0 - curr_patch_canvas_hard_non_grad # [0.0-stroke, 1.0-BG] + curr_patch_canvas_hard_non_grad = self.normalize_image_m1to1(curr_patch_canvas_hard_non_grad) # [-1.0-stroke, 1.0-BG] + + ## image-level encoding + combined_z = self.build_combined_encoder( + curr_patch_canvas_hard_non_grad, + curr_patch_inputs, + 1.0 - curr_canvas_hard_non_grad, + self.input_photo, + cursor_position_non_grad, + image_size, + curr_window_size) # (N, z_size) + combined_z = tf.expand_dims(combined_z, axis=1) # (N, 1, z_size) + + curr_window_size_top_side_norm_non_grad = \ + tf.stop_gradient(curr_window_size / tf.cast(image_size, tf.float32)) + curr_window_size_bottom_side_norm_non_grad = \ + tf.stop_gradient(curr_window_size / tf.cast(self.hps.min_window_size, tf.float32)) + if not self.hps.concat_win_size: + combined_z = tf.concat([tf.stop_gradient(prev_width), combined_z], 2) # (N, 1, 2+z_size) + else: + combined_z = tf.concat([tf.stop_gradient(prev_width), + curr_window_size_top_side_norm_non_grad, + curr_window_size_bottom_side_norm_non_grad, + combined_z], + 2) # (N, 1, 2+z_size) + + if self.hps.concat_cursor: + prev_input_x = tf.concat([cursor_position_non_grad, combined_z], 2) # (N, 1, 2+2+z_size) + else: + prev_input_x = combined_z # (N, 1, 2+z_size) + + h_output, next_state = self.build_seq_decoder(self.dec_cell, prev_input_x, prev_state) + # h_output: (N * 1, n_out), next_state: (N, dec_rnn_size * 3) + [o_other_params, o_pen_ras] = self.get_mixture_coef(h_output) + # o_other_params: (N * 1, 6) + # o_pen_ras: (N * 1, 2), after softmax + + o_other_params = tf.reshape(o_other_params, [-1, 1, 6]) # (N, 1, 6) + o_pen_ras_raw = tf.reshape(o_pen_ras, [-1, 1, 2]) # (N, 1, 2) + + other_params_list.append(o_other_params) + pen_ras_list.append(o_pen_ras_raw) + + #### sampling part - end #### + + prev_state = next_state + + other_params_ = tf.reshape(tf.concat(other_params_list, axis=1), [-1, 6]) # (N * max_seq_len, 6) + pen_ras_ = tf.reshape(tf.concat(pen_ras_list, axis=1), [-1, 2]) # (N * max_seq_len, 2) + + return other_params_, pen_ras_, prev_state + + def differentiable_argmax(self, input_pen, soft_beta): + """ + Differentiable argmax trick. + :param input_pen: (N, n_class) + :return: pen_state: (N, 1) + """ + def sign_onehot(x): + """ + :param x: (N, n_class) + :return: (N, n_class) + """ + y = tf.sign(tf.reduce_max(x, axis=-1, keepdims=True) - x) + y = (y - 1) * (-1) + return y + + def softargmax(x, beta=1e2): + """ + :param x: (N, n_class) + :param beta: 1e10 is the best. 1e2 is acceptable. + :return: (N) + """ + x_range = tf.cumsum(tf.ones_like(x), axis=1) # (N, 2) + return tf.reduce_sum(tf.nn.softmax(x * beta) * x_range, axis=1) - 1 + + ## Better to use softargmax(beta=1e2). The sign_onehot's gradient is close to zero. + # pen_onehot = sign_onehot(input_pen) # one-hot form, (N * max_seq_len, 2) + # pen_state = pen_onehot[:, 1:2] # (N * max_seq_len, 1) + pen_state = softargmax(input_pen, soft_beta) + pen_state = tf.expand_dims(pen_state, axis=1) # (N * max_seq_len, 1) + return pen_state diff --git a/robot_painting/qmupd_vs/model_common_train.py b/robot_painting/qmupd_vs/model_common_train.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c22b33f45a9cbd7e69c878866cb2fb6dd81f7c --- /dev/null +++ b/robot_painting/qmupd_vs/model_common_train.py @@ -0,0 +1,1193 @@ +import rnn +import tensorflow as tf + +from subnet_tf_utils import generative_cnn_encoder, generative_cnn_encoder_deeper, generative_cnn_encoder_deeper13, \ + generative_cnn_c3_encoder, generative_cnn_c3_encoder_deeper, generative_cnn_c3_encoder_deeper13, \ + generative_cnn_c3_encoder_combine33, generative_cnn_c3_encoder_combine43, \ + generative_cnn_c3_encoder_combine53, generative_cnn_c3_encoder_combineFC, \ + generative_cnn_c3_encoder_deeper13_attn +from rasterization_utils.NeuralRenderer import NeuralRasterizorStep +from vgg_utils.VGG16 import vgg_net_slim + + +class VirtualSketchingModel(object): + def __init__(self, hps, gpu_mode=True, reuse=False): + """Initializer for the model. + + Args: + hps: a HParams object containing model hyperparameters + gpu_mode: a boolean that when True, uses GPU mode. + reuse: a boolean that when true, attemps to reuse variables. + """ + self.hps = hps + assert hps.model_mode in ['train', 'eval', 'eval_sample', 'sample'] + # with tf.variable_scope('SCC', reuse=reuse): + if not gpu_mode: + with tf.device('/cpu:0'): + print('Model using cpu.') + self.build_model() + else: + print('-' * 100) + print('model_mode:', hps.model_mode) + print('Model using gpu.') + self.build_model() + + def build_model(self): + """Define model architecture.""" + self.config_model() + + initial_state = self.get_decoder_inputs() + self.initial_state = initial_state + self.initial_state_list = tf.split(self.initial_state, self.total_loop, axis=0) + + total_loss_list = [] + ras_loss_list = [] + perc_relu_raw_list = [] + perc_relu_norm_list = [] + sn_loss_list = [] + cursor_outside_loss_list = [] + win_size_outside_loss_list = [] + early_state_loss_list = [] + + tower_grads = [] + + pred_raster_imgs_list = [] + pred_raster_imgs_rgb_list = [] + + for t_i in range(self.total_loop): + gpu_idx = t_i // self.hps.loop_per_gpu + gpu_i = self.hps.gpus[gpu_idx] + print(self.hps.model_mode, 'model, gpu:', gpu_i, ', loop:', t_i % self.hps.loop_per_gpu) + with tf.device('/gpu:%d' % gpu_i): + with tf.name_scope('GPU_%d' % gpu_i) as scope: + if t_i > 0: + tf.get_variable_scope().reuse_variables() + else: + total_loss_list.clear() + ras_loss_list.clear() + perc_relu_raw_list.clear() + perc_relu_norm_list.clear() + sn_loss_list.clear() + cursor_outside_loss_list.clear() + win_size_outside_loss_list.clear() + early_state_loss_list.clear() + tower_grads.clear() + pred_raster_imgs_list.clear() + pred_raster_imgs_rgb_list.clear() + + split_input_photo = self.input_photo_list[t_i] + split_image_size = self.image_size[t_i] + split_init_cursor = self.init_cursor_list[t_i] + split_initial_state = self.initial_state_list[t_i] + if self.hps.input_channel == 1: + split_target_sketch = split_input_photo + else: + split_target_sketch = self.target_sketch_list[t_i] + + ## use pred as the prev points + other_params, pen_ras, final_state, pred_raster_images, pred_raster_images_rgb, \ + pos_before_max_min, win_size_before_max_min \ + = self.get_points_and_raster_image(split_initial_state, split_init_cursor, split_input_photo, + split_image_size) + # other_params: (N * max_seq_len, 6) + # pen_ras: (N * max_seq_len, 2), after softmax + # pos_before_max_min: (N, max_seq_len, 2), in image_size + # win_size_before_max_min: (N, max_seq_len, 1), in image_size + + pred_raster_imgs = 1.0 - pred_raster_images # (N, image_size, image_size), [0.0-stroke, 1.0-BG] + pred_raster_imgs_rgb = 1.0 - pred_raster_images_rgb # (N, image_size, image_size, 3) + pred_raster_imgs_list.append(pred_raster_imgs) + pred_raster_imgs_rgb_list.append(pred_raster_imgs_rgb) + + if not self.hps.use_softargmax: + pen_state_soft = pen_ras[:, 1:2] # (N * max_seq_len, 1) + else: + pen_state_soft = self.differentiable_argmax(pen_ras, self.hps.soft_beta) # (N * max_seq_len, 1) + + pred_params = tf.concat([pen_state_soft, other_params], axis=1) # (N * max_seq_len, 7) + pred_params = tf.reshape(pred_params, shape=[-1, self.hps.max_seq_len, 7]) # (N, max_seq_len, 7) + # pred_params: (N, max_seq_len, 7) + + if self.hps.model_mode == 'train' or self.hps.model_mode == 'eval': + raster_cost, sn_cost, cursor_outside_cost, winsize_outside_cost, \ + early_pen_states_cost, \ + perc_relu_loss_raw, perc_relu_loss_norm = \ + self.build_losses(split_target_sketch, pred_raster_imgs, pred_params, + pos_before_max_min, win_size_before_max_min, + split_image_size) + # perc_relu_loss_raw, perc_relu_loss_norm: (n_layers) + + ras_loss_list.append(raster_cost) + perc_relu_raw_list.append(perc_relu_loss_raw) + perc_relu_norm_list.append(perc_relu_loss_norm) + sn_loss_list.append(sn_cost) + cursor_outside_loss_list.append(cursor_outside_cost) + win_size_outside_loss_list.append(winsize_outside_cost) + early_state_loss_list.append(early_pen_states_cost) + + if self.hps.model_mode == 'train': + total_cost_split, grads_and_vars_split = self.build_training_op_split( + raster_cost, sn_cost, cursor_outside_cost, winsize_outside_cost, + early_pen_states_cost) + total_loss_list.append(total_cost_split) + tower_grads.append(grads_and_vars_split) + + self.raster_cost = tf.reduce_mean(tf.stack(ras_loss_list, axis=0)) + self.perc_relu_losses_raw = tf.reduce_mean(tf.stack(perc_relu_raw_list, axis=0), axis=0) # (n_layers) + self.perc_relu_losses_norm = tf.reduce_mean(tf.stack(perc_relu_norm_list, axis=0), axis=0) # (n_layers) + self.stroke_num_cost = tf.reduce_mean(tf.stack(sn_loss_list, axis=0)) + self.pos_outside_cost = tf.reduce_mean(tf.stack(cursor_outside_loss_list, axis=0)) + self.win_size_outside_cost = tf.reduce_mean(tf.stack(win_size_outside_loss_list, axis=0)) + self.early_pen_states_cost = tf.reduce_mean(tf.stack(early_state_loss_list, axis=0)) + self.cost = tf.reduce_mean(tf.stack(total_loss_list, axis=0)) + + self.pred_raster_imgs = tf.concat(pred_raster_imgs_list, axis=0) # (N, image_size, image_size), [0.0-stroke, 1.0-BG] + self.pred_raster_imgs_rgb = tf.concat(pred_raster_imgs_rgb_list, axis=0) # (N, image_size, image_size, 3) + + if self.hps.model_mode == 'train': + self.build_training_op(tower_grads) + + def config_model(self): + if self.hps.model_mode == 'train': + self.global_step = tf.Variable(0, name='global_step', trainable=False) + + if self.hps.dec_model == 'lstm': + dec_cell_fn = rnn.LSTMCell + elif self.hps.dec_model == 'layer_norm': + dec_cell_fn = rnn.LayerNormLSTMCell + elif self.hps.dec_model == 'hyper': + dec_cell_fn = rnn.HyperLSTMCell + else: + assert False, 'please choose a respectable cell' + + use_recurrent_dropout = self.hps.use_recurrent_dropout + use_input_dropout = self.hps.use_input_dropout + use_output_dropout = self.hps.use_output_dropout + + dec_cell = dec_cell_fn( + self.hps.dec_rnn_size, + use_recurrent_dropout=use_recurrent_dropout, + dropout_keep_prob=self.hps.recurrent_dropout_prob) + + # dropout: + # print('Input dropout mode = %s.' % use_input_dropout) + # print('Output dropout mode = %s.' % use_output_dropout) + # print('Recurrent dropout mode = %s.' % use_recurrent_dropout) + if use_input_dropout: + print('Dropout to input w/ keep_prob = %4.4f.' % self.hps.input_dropout_prob) + dec_cell = tf.contrib.rnn.DropoutWrapper( + dec_cell, input_keep_prob=self.hps.input_dropout_prob) + if use_output_dropout: + print('Dropout to output w/ keep_prob = %4.4f.' % self.hps.output_dropout_prob) + dec_cell = tf.contrib.rnn.DropoutWrapper( + dec_cell, output_keep_prob=self.hps.output_dropout_prob) + self.dec_cell = dec_cell + + self.total_loop = len(self.hps.gpus) * self.hps.loop_per_gpu + + self.init_cursor = tf.placeholder( + dtype=tf.float32, + shape=[self.hps.batch_size, 1, 2]) # (N, 1, 2), in size [0.0, 1.0) + self.init_width = tf.placeholder( + dtype=tf.float32, + shape=[1]) # (1), in [0.0, 1.0] + self.image_size = tf.placeholder(dtype=tf.int32, shape=(self.total_loop)) # () + + self.init_cursor_list = tf.split(self.init_cursor, self.total_loop, axis=0) + self.input_photo_list = [] + for loop_i in range(self.total_loop): + input_photo_i = tf.placeholder(dtype=tf.float32, shape=[None, None, None, self.hps.input_channel]) # [0.0-stroke, 1.0-BG] + self.input_photo_list.append(input_photo_i) + + if self.hps.input_channel == 3: + self.target_sketch_list = [] + for loop_i in range(self.total_loop): + target_sketch_i = tf.placeholder(dtype=tf.float32, shape=[None, None, None, 1]) # [0.0-stroke, 1.0-BG] + self.target_sketch_list.append(target_sketch_i) + + if self.hps.model_mode == 'train' or self.hps.model_mode == 'eval': + self.stroke_num_loss_weight = tf.Variable(0.0, trainable=False) + self.early_pen_loss_start_idx = tf.Variable(0, dtype=tf.int32, trainable=False) + self.early_pen_loss_end_idx = tf.Variable(0, dtype=tf.int32, trainable=False) + + if self.hps.model_mode == 'train': + self.perc_loss_mean_list = [] + for loop_i in range(len(self.hps.perc_loss_layers)): + relu_loss_mean = tf.Variable(0.0, trainable=False) + self.perc_loss_mean_list.append(relu_loss_mean) + self.last_step_num = tf.Variable(0.0, trainable=False) + + with tf.variable_scope('train_op', reuse=tf.AUTO_REUSE): + self.lr = tf.Variable(self.hps.learning_rate, trainable=False) + self.optimizer = tf.train.AdamOptimizer(self.lr) + + ########################### + + def normalize_image_m1to1(self, in_img_0to1): + norm_img_m1to1 = tf.multiply(in_img_0to1, 2.0) + norm_img_m1to1 = tf.subtract(norm_img_m1to1, 1.0) + return norm_img_m1to1 + + def add_coords(self, input_tensor): + batch_size_tensor = tf.shape(input_tensor)[0] # get N size + + xx_ones = tf.ones([batch_size_tensor, self.hps.raster_size], dtype=tf.int32) # e.g. (N, raster_size) + xx_ones = tf.expand_dims(xx_ones, -1) # e.g. (N, raster_size, 1) + xx_range = tf.tile(tf.expand_dims(tf.range(self.hps.raster_size), 0), + [batch_size_tensor, 1]) # e.g. (N, raster_size) + xx_range = tf.expand_dims(xx_range, 1) # e.g. (N, 1, raster_size) + + xx_channel = tf.matmul(xx_ones, xx_range) # e.g. (N, raster_size, raster_size) + xx_channel = tf.expand_dims(xx_channel, -1) # e.g. (N, raster_size, raster_size, 1) + + yy_ones = tf.ones([batch_size_tensor, self.hps.raster_size], dtype=tf.int32) # e.g. (N, raster_size) + yy_ones = tf.expand_dims(yy_ones, 1) # e.g. (N, 1, raster_size) + yy_range = tf.tile(tf.expand_dims(tf.range(self.hps.raster_size), 0), + [batch_size_tensor, 1]) # (N, raster_size) + yy_range = tf.expand_dims(yy_range, -1) # e.g. (N, raster_size, 1) + + yy_channel = tf.matmul(yy_range, yy_ones) # e.g. (N, raster_size, raster_size) + yy_channel = tf.expand_dims(yy_channel, -1) # e.g. (N, raster_size, raster_size, 1) + + xx_channel = tf.cast(xx_channel, 'float32') / (self.hps.raster_size - 1) + yy_channel = tf.cast(yy_channel, 'float32') / (self.hps.raster_size - 1) + # xx_channel = xx_channel * 2 - 1 # [-1, 1] + # yy_channel = yy_channel * 2 - 1 + + ret = tf.concat([ + input_tensor, + xx_channel, + yy_channel, + ], axis=-1) # e.g. (N, raster_size, raster_size, 4) + + return ret + + def build_combined_encoder(self, patch_canvas, patch_photo, entire_canvas, entire_photo, cursor_pos, + image_size, window_size): + """ + :param patch_canvas: (N, raster_size, raster_size, 1), [-1.0-stroke, 1.0-BG] + :param patch_photo: (N, raster_size, raster_size, 1/3), [-1.0-stroke, 1.0-BG] + :param entire_canvas: (N, image_size, image_size, 1), [0.0-stroke, 1.0-BG] + :param entire_photo: (N, image_size, image_size, 1/3), [0.0-stroke, 1.0-BG] + :param cursor_pos: (N, 1, 2), in size [0.0, 1.0) + :param window_size: (N, 1, 1), float, in large size + :return: + """ + if self.hps.resize_method == 'BILINEAR': + resize_method = tf.image.ResizeMethod.BILINEAR + elif self.hps.resize_method == 'NEAREST_NEIGHBOR': + resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR + elif self.hps.resize_method == 'BICUBIC': + resize_method = tf.image.ResizeMethod.BICUBIC + elif self.hps.resize_method == 'AREA': + resize_method = tf.image.ResizeMethod.AREA + else: + raise Exception('unknown resize_method', self.hps.resize_method) + + patch_photo = tf.stop_gradient(patch_photo) + patch_canvas = tf.stop_gradient(patch_canvas) + cursor_pos = tf.stop_gradient(cursor_pos) + window_size = tf.stop_gradient(window_size) + + entire_photo_small = tf.stop_gradient(tf.image.resize_images(entire_photo, + (self.hps.raster_size, self.hps.raster_size), + method=resize_method)) + entire_canvas_small = tf.stop_gradient(tf.image.resize_images(entire_canvas, + (self.hps.raster_size, self.hps.raster_size), + method=resize_method)) + entire_photo_small = self.normalize_image_m1to1(entire_photo_small) # [-1.0-stroke, 1.0-BG] + entire_canvas_small = self.normalize_image_m1to1(entire_canvas_small) # [-1.0-stroke, 1.0-BG] + + if self.hps.encode_cursor_type == 'value': + cursor_pos_norm = tf.expand_dims(cursor_pos, axis=1) # (N, 1, 1, 2) + cursor_pos_norm = tf.tile(cursor_pos_norm, [1, self.hps.raster_size, self.hps.raster_size, 1]) + cursor_info = cursor_pos_norm + else: + raise Exception('Unknown encode_cursor_type', self.hps.encode_cursor_type) + + batch_input_combined = tf.concat([patch_photo, patch_canvas, entire_photo_small, entire_canvas_small, cursor_info], + axis=-1) # [N, raster_size, raster_size, 6/10] + batch_input_local = tf.concat([patch_photo, patch_canvas], axis=-1) # [N, raster_size, raster_size, 2/4] + batch_input_global = tf.concat([entire_photo_small, entire_canvas_small, cursor_info], + axis=-1) # [N, raster_size, raster_size, 4/6] + + if self.hps.model_mode == 'train': + is_training = True + dropout_keep_prob = self.hps.pix_drop_kp + else: + is_training = False + dropout_keep_prob = 1.0 + + if self.hps.add_coordconv: + batch_input_combined = self.add_coords(batch_input_combined) # (N, in_H, in_W, in_dim + 2) + batch_input_local = self.add_coords(batch_input_local) # (N, in_H, in_W, in_dim + 2) + batch_input_global = self.add_coords(batch_input_global) # (N, in_H, in_W, in_dim + 2) + + if 'combine' in self.hps.encoder_type: + if self.hps.encoder_type == 'combine33': + image_embedding, _ = generative_cnn_c3_encoder_combine33(batch_input_local, batch_input_global, + is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'combine43': + image_embedding, _ = generative_cnn_c3_encoder_combine43(batch_input_local, batch_input_global, + is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'combine53': + image_embedding, _ = generative_cnn_c3_encoder_combine53(batch_input_local, batch_input_global, + is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'combineFC': + image_embedding, _ = generative_cnn_c3_encoder_combineFC(batch_input_local, batch_input_global, + is_training, dropout_keep_prob) # (N, 256) + else: + raise Exception('Unknown encoder_type', self.hps.encoder_type) + else: + with tf.variable_scope('Combined_Encoder', reuse=tf.AUTO_REUSE): + if self.hps.encoder_type == 'conv10': + image_embedding, _ = generative_cnn_encoder(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'conv10_deep': + image_embedding, _ = generative_cnn_encoder_deeper(batch_input_combined, is_training, dropout_keep_prob) # (N, 512) + elif self.hps.encoder_type == 'conv13': + image_embedding, _ = generative_cnn_encoder_deeper13(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'conv10_c3': + image_embedding, _ = generative_cnn_c3_encoder(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'conv10_deep_c3': + image_embedding, _ = generative_cnn_c3_encoder_deeper(batch_input_combined, is_training, dropout_keep_prob) # (N, 512) + elif self.hps.encoder_type == 'conv13_c3': + image_embedding, _ = generative_cnn_c3_encoder_deeper13(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + elif self.hps.encoder_type == 'conv13_c3_attn': + image_embedding, _ = generative_cnn_c3_encoder_deeper13_attn(batch_input_combined, is_training, dropout_keep_prob) # (N, 128) + else: + raise Exception('Unknown encoder_type', self.hps.encoder_type) + return image_embedding + + def build_seq_decoder(self, dec_cell, actual_input_x, initial_state): + rnn_output, last_state = self.rnn_decoder(dec_cell, initial_state, actual_input_x) + rnn_output_flat = tf.reshape(rnn_output, [-1, self.hps.dec_rnn_size]) + + pen_n_out = 2 + params_n_out = 6 + + with tf.variable_scope('DEC_RNN_out_pen', reuse=tf.AUTO_REUSE): + output_w_pen = tf.get_variable('output_w', [self.hps.dec_rnn_size, pen_n_out]) + output_b_pen = tf.get_variable('output_b', [pen_n_out], initializer=tf.constant_initializer(0.0)) + output_pen = tf.nn.xw_plus_b(rnn_output_flat, output_w_pen, output_b_pen) # (N, pen_n_out) + + with tf.variable_scope('DEC_RNN_out_params', reuse=tf.AUTO_REUSE): + output_w_params = tf.get_variable('output_w', [self.hps.dec_rnn_size, params_n_out]) + output_b_params = tf.get_variable('output_b', [params_n_out], initializer=tf.constant_initializer(0.0)) + output_params = tf.nn.xw_plus_b(rnn_output_flat, output_w_params, output_b_params) # (N, params_n_out) + + output = tf.concat([output_pen, output_params], axis=1) # (N, n_out) + + return output, last_state + + def get_mixture_coef(self, outputs): + z = outputs + z_pen_logits = z[:, 0:2] # (N, 2), pen states + z_other_params_logits = z[:, 2:] # (N, 6) + + z_pen = tf.nn.softmax(z_pen_logits) # (N, 2) + if self.hps.position_format == 'abs': + x1y1 = tf.nn.sigmoid(z_other_params_logits[:, 0:2]) # (N, 2) + x2y2 = tf.tanh(z_other_params_logits[:, 2:4]) # (N, 2) + widths = tf.nn.sigmoid(z_other_params_logits[:, 4:5]) # (N, 1) + widths = tf.add(tf.multiply(widths, 1.0 - self.hps.min_width), self.hps.min_width) + scaling = tf.nn.sigmoid(z_other_params_logits[:, 5:6]) * self.hps.max_scaling # (N, 1), [0.0, max_scaling] + # scaling = tf.add(tf.multiply(scaling, (self.hps.max_scaling - self.hps.min_scaling) / self.hps.max_scaling), + # self.hps.min_scaling) + z_other_params = tf.concat([x1y1, x2y2, widths, scaling], axis=-1) # (N, 6) + else: # "rel" + raise Exception('Unknown position_format', self.hps.position_format) + + r = [z_other_params, z_pen] + return r + + ########################### + + def get_decoder_inputs(self): + initial_state = self.dec_cell.zero_state(batch_size=self.hps.batch_size, dtype=tf.float32) + return initial_state + + def rnn_decoder(self, dec_cell, initial_state, actual_input_x): + with tf.variable_scope("RNN_DEC", reuse=tf.AUTO_REUSE): + output, last_state = tf.nn.dynamic_rnn( + dec_cell, + actual_input_x, + initial_state=initial_state, + time_major=False, + swap_memory=True, + dtype=tf.float32) + return output, last_state + + ########################### + + def image_padding(self, ori_image, window_size, pad_value): + """ + Pad with (bg) + :param ori_image: + :return: + """ + paddings = [[0, 0], + [window_size // 2, window_size // 2], + [window_size // 2, window_size // 2], + [0, 0]] + pad_img = tf.pad(ori_image, paddings=paddings, mode='CONSTANT', constant_values=pad_value) # (N, H_p, W_p, k) + return pad_img + + def image_cropping_fn(self, fn_inputs): + """ + crop the patch + :return: + """ + index_offset = self.hps.input_channel - 1 + input_image = fn_inputs[:, :, 0:2 + index_offset] # (image_size, image_size, 2), [0.0-BG, 1.0-stroke] + cursor_pos = fn_inputs[0, 0, 2 + index_offset:4 + index_offset] # (2), in [0.0, 1.0) + image_size = fn_inputs[0, 0, 4 + index_offset] # (), float32 + window_size = tf.cast(fn_inputs[0, 0, 5 + index_offset], tf.int32) # () + + input_img_reshape = tf.expand_dims(input_image, axis=0) + pad_img = self.image_padding(input_img_reshape, window_size, pad_value=0.0) + + cursor_pos = tf.cast(tf.round(tf.multiply(cursor_pos, image_size)), dtype=tf.int32) + x0, x1 = cursor_pos[0], cursor_pos[0] + window_size # () + y0, y1 = cursor_pos[1], cursor_pos[1] + window_size # () + patch_image = pad_img[:, y0:y1, x0:x1, :] # (1, window_size, window_size, 2/4) + + # resize to raster_size + patch_image_scaled = tf.image.resize_images(patch_image, (self.hps.raster_size, self.hps.raster_size), + method=tf.image.ResizeMethod.AREA) + patch_image_scaled = tf.squeeze(patch_image_scaled, axis=0) + # patch_canvas_scaled: (raster_size, raster_size, 2/4), [0.0-BG, 1.0-stroke] + + return patch_image_scaled + + def image_cropping(self, cursor_position, input_img, image_size, window_sizes): + """ + :param cursor_position: (N, 1, 2), float type, in size [0.0, 1.0) + :param input_img: (N, image_size, image_size, 2/4), [0.0-BG, 1.0-stroke] + :param window_sizes: (N, 1, 1), float32, with grad + """ + input_img_ = input_img + window_sizes_non_grad = tf.stop_gradient(tf.round(window_sizes)) # (N, 1, 1), no grad + + cursor_position_ = tf.reshape(cursor_position, (-1, 1, 1, 2)) # (N, 1, 1, 2) + cursor_position_ = tf.tile(cursor_position_, [1, image_size, image_size, 1]) # (N, image_size, image_size, 2) + + image_size_ = tf.reshape(tf.cast(image_size, tf.float32), (1, 1, 1, 1)) # (1, 1, 1, 1) + image_size_ = tf.tile(image_size_, [self.hps.batch_size // self.total_loop, image_size, image_size, 1]) + + window_sizes_ = tf.reshape(window_sizes_non_grad, (-1, 1, 1, 1)) # (N, 1, 1, 1) + window_sizes_ = tf.tile(window_sizes_, [1, image_size, image_size, 1]) # (N, image_size, image_size, 1) + + fn_inputs = tf.concat([input_img_, cursor_position_, image_size_, window_sizes_], + axis=-1) # (N, image_size, image_size, 2/4 + 4) + curr_patch_imgs = tf.map_fn(self.image_cropping_fn, fn_inputs, parallel_iterations=32) # (N, raster_size, raster_size, -) + return curr_patch_imgs + + def image_cropping_v3(self, cursor_position, input_img, image_size, window_sizes): + """ + :param cursor_position: (N, 1, 2), float type, in size [0.0, 1.0) + :param input_img: (N, image_size, image_size, k), [0.0-BG, 1.0-stroke] + :param window_sizes: (N, 1, 1), float32, with grad + """ + window_sizes_non_grad = tf.stop_gradient(window_sizes) # (N, 1, 1), no grad + + cursor_pos = tf.multiply(cursor_position, tf.cast(image_size, tf.float32)) + cursor_x, cursor_y = tf.split(cursor_pos, 2, axis=-1) # (N, 1, 1) + + y1 = cursor_y - (window_sizes_non_grad - 1.0) / 2 + x1 = cursor_x - (window_sizes_non_grad - 1.0) / 2 + y2 = y1 + (window_sizes_non_grad - 1.0) + x2 = x1 + (window_sizes_non_grad - 1.0) + boxes = tf.concat([y1, x1, y2, x2], axis=-1) # (N, 1, 4) + boxes = tf.squeeze(boxes, axis=1) # (N, 4) + boxes = boxes / tf.cast(image_size - 1, tf.float32) + + box_ind = tf.ones_like(cursor_x)[:, 0, 0] # (N) + box_ind = tf.cast(box_ind, dtype=tf.int32) + box_ind = tf.cumsum(box_ind) - 1 + + curr_patch_imgs = tf.image.crop_and_resize(input_img, boxes, box_ind, + crop_size=[self.hps.raster_size, self.hps.raster_size]) + # (N, raster_size, raster_size, k), [0.0-BG, 1.0-stroke] + return curr_patch_imgs + + def get_pixel_value(self, img, x, y): + """ + Utility function to get pixel value for coordinate vectors x and y from a 4D tensor image. + + Input + ----- + - img: tensor of shape (B, H, W, C) + - x: flattened tensor of shape (B, H', W') + - y: flattened tensor of shape (B, H', W') + + Returns + ------- + - output: tensor of shape (B, H', W', C) + """ + shape = tf.shape(x) + batch_size = shape[0] + height = shape[1] + width = shape[2] + + batch_idx = tf.range(0, batch_size) + batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1)) + b = tf.tile(batch_idx, (1, height, width)) + + indices = tf.stack([b, y, x], 3) + + return tf.gather_nd(img, indices) + + def image_pasting_nondiff_single(self, fn_inputs): + patch_image = fn_inputs[:, :, 0:1] # (raster_size, raster_size, 1), [0.0-BG, 1.0-stroke] + cursor_pos = fn_inputs[0, 0, 1:3] # (2), in large size + image_size = tf.cast(fn_inputs[0, 0, 3], tf.int32) # () + window_size = tf.cast(fn_inputs[0, 0, 4], tf.int32) # () + + patch_image_scaled = tf.expand_dims(patch_image, axis=0) # (1, raster_size, raster_size, 1) + patch_image_scaled = tf.image.resize_images(patch_image_scaled, (window_size, window_size), + method=tf.image.ResizeMethod.BILINEAR) + patch_image_scaled = tf.squeeze(patch_image_scaled, axis=0) + # patch_canvas_scaled: (window_size, window_size, 1) + + cursor_pos = tf.cast(tf.round(cursor_pos), dtype=tf.int32) # (2) + cursor_x, cursor_y = cursor_pos[0], cursor_pos[1] + + pad_up = cursor_y + pad_down = image_size - cursor_y + pad_left = cursor_x + pad_right = image_size - cursor_x + + paddings = [[pad_up, pad_down], + [pad_left, pad_right], + [0, 0]] + pad_img = tf.pad(patch_image_scaled, paddings=paddings, mode='CONSTANT', + constant_values=0.0) # (H_p, W_p, 1), [0.0-BG, 1.0-stroke] + + crop_start = window_size // 2 + pasted_image = pad_img[crop_start: crop_start + image_size, crop_start: crop_start + image_size, :] + return pasted_image + + def image_pasting_diff_single(self, fn_inputs): + patch_canvas = fn_inputs[:, :, 0:1] # (raster_size, raster_size, 1), [0.0-BG, 1.0-stroke] + cursor_pos = fn_inputs[0, 0, 1:3] # (2), in large size + image_size = tf.cast(fn_inputs[0, 0, 3], tf.int32) # () + window_size = tf.cast(fn_inputs[0, 0, 4], tf.int32) # () + cursor_x, cursor_y = cursor_pos[0], cursor_pos[1] + + patch_canvas_scaled = tf.expand_dims(patch_canvas, axis=0) # (1, raster_size, raster_size, 1) + patch_canvas_scaled = tf.image.resize_images(patch_canvas_scaled, (window_size, window_size), + method=tf.image.ResizeMethod.BILINEAR) + # patch_canvas_scaled: (1, window_size, window_size, 1) + + valid_canvas = self.image_pasting_diff_batch(patch_canvas_scaled, + tf.expand_dims(tf.expand_dims(cursor_pos, axis=0), axis=0), + window_size) + valid_canvas = tf.squeeze(valid_canvas, axis=0) + # (window_size + 1, window_size + 1, 1) + + pad_up = tf.cast(tf.floor(cursor_y), tf.int32) + pad_down = image_size - 1 - tf.cast(tf.floor(cursor_y), tf.int32) + pad_left = tf.cast(tf.floor(cursor_x), tf.int32) + pad_right = image_size - 1 - tf.cast(tf.floor(cursor_x), tf.int32) + + paddings = [[pad_up, pad_down], + [pad_left, pad_right], + [0, 0]] + pad_img = tf.pad(valid_canvas, paddings=paddings, mode='CONSTANT', + constant_values=0.0) # (H_p, W_p, 1), [0.0-BG, 1.0-stroke] + + crop_start = window_size // 2 + pasted_image = pad_img[crop_start: crop_start + image_size, crop_start: crop_start + image_size, :] + return pasted_image + + def image_pasting_diff_single_v3(self, fn_inputs): + patch_canvas = fn_inputs[:, :, 0:1] # (raster_size, raster_size, 1), [0.0-BG, 1.0-stroke] + cursor_pos_a = fn_inputs[0, 0, 1:3] # (2), float32, in large size + image_size_a = tf.cast(fn_inputs[0, 0, 3], tf.int32) # () + window_size_a = fn_inputs[0, 0, 4] # (), float32, with grad + raster_size_a = float(self.hps.raster_size) + + padding_size = tf.cast(tf.ceil(window_size_a / 2.0), tf.int32) + + x1y1_a = cursor_pos_a - window_size_a / 2.0 # (2), float32 + x2y2_a = cursor_pos_a + window_size_a / 2.0 # (2), float32 + + x1y1_a_floor = tf.floor(x1y1_a) # (2) + x2y2_a_ceil = tf.ceil(x2y2_a) # (2) + + cursor_pos_b_oricoord = (x1y1_a_floor + x2y2_a_ceil) / 2.0 # (2) + cursor_pos_b = (cursor_pos_b_oricoord - x1y1_a) / window_size_a * raster_size_a # (2) + raster_size_b = (x2y2_a_ceil - x1y1_a_floor) # (x, y) + image_size_b = raster_size_a + window_size_b = raster_size_a * (raster_size_b / window_size_a) # (x, y) + + cursor_b_x, cursor_b_y = tf.split(cursor_pos_b, 2, axis=-1) # (1) + + y1_b = cursor_b_y - (window_size_b[1] - 1.) / 2. + x1_b = cursor_b_x - (window_size_b[0] - 1.) / 2. + y2_b = y1_b + (window_size_b[1] - 1.) + x2_b = x1_b + (window_size_b[0] - 1.) + boxes_b = tf.concat([y1_b, x1_b, y2_b, x2_b], axis=-1) # (4) + boxes_b = boxes_b / tf.cast(image_size_b - 1, tf.float32) # with grad to window_size_a + + box_ind_b = tf.ones((1), dtype=tf.int32) # (1) + box_ind_b = tf.cumsum(box_ind_b) - 1 + + patch_canvas = tf.expand_dims(patch_canvas, axis=0) # (1, raster_size, raster_size, 1), [0.0-BG, 1.0-stroke] + boxes_b = tf.expand_dims(boxes_b, axis=0) # (1, 4) + + valid_canvas = tf.image.crop_and_resize(patch_canvas, boxes_b, box_ind_b, + crop_size=[raster_size_b[1], raster_size_b[0]]) + valid_canvas = valid_canvas[0] # (raster_size_b, raster_size_b, 1) + + pad_up = tf.cast(x1y1_a_floor[1], tf.int32) + padding_size + pad_down = image_size_a + padding_size - tf.cast(x2y2_a_ceil[1], tf.int32) + pad_left = tf.cast(x1y1_a_floor[0], tf.int32) + padding_size + pad_right = image_size_a + padding_size - tf.cast(x2y2_a_ceil[0], tf.int32) + + paddings = [[pad_up, pad_down], + [pad_left, pad_right], + [0, 0]] + pad_img = tf.pad(valid_canvas, paddings=paddings, mode='CONSTANT', + constant_values=0.0) # (H_p, W_p, 1), [0.0-BG, 1.0-stroke] + + pasted_image = pad_img[padding_size: padding_size + image_size_a, padding_size: padding_size + image_size_a, :] + return pasted_image + + def image_pasting_diff_batch(self, patch_image, cursor_position, window_size): + """ + :param patch_img: (N, window_size, window_size, 1), [0.0-BG, 1.0-stroke] + :param cursor_position: (N, 1, 2), in large size + :return: + """ + paddings1 = [[0, 0], + [1, 1], + [1, 1], + [0, 0]] + patch_image_pad1 = tf.pad(patch_image, paddings=paddings1, mode='CONSTANT', + constant_values=0.0) # (N, window_size+2, window_size+2, 1), [0.0-BG, 1.0-stroke] + + cursor_x, cursor_y = cursor_position[:, :, 0:1], cursor_position[:, :, 1:2] # (N, 1, 1) + cursor_x_f, cursor_y_f = tf.floor(cursor_x), tf.floor(cursor_y) + patch_x, patch_y = 1.0 - (cursor_x - cursor_x_f), 1.0 - (cursor_y - cursor_y_f) # (N, 1, 1) + + x_ones = tf.ones_like(patch_x, dtype=tf.float32) # (N, 1, 1) + x_ones = tf.tile(x_ones, [1, 1, window_size]) # (N, 1, window_size) + patch_x = tf.concat([patch_x, x_ones], axis=-1) # (N, 1, window_size + 1) + patch_x = tf.tile(patch_x, [1, window_size + 1, 1]) # (N, window_size + 1, window_size + 1) + patch_x = tf.cumsum(patch_x, axis=-1) # (N, window_size + 1, window_size + 1) + patch_x0 = tf.cast(tf.floor(patch_x), tf.int32) # (N, window_size + 1, window_size + 1) + patch_x1 = patch_x0 + 1 # (N, window_size + 1, window_size + 1) + + y_ones = tf.ones_like(patch_y, dtype=tf.float32) # (N, 1, 1) + y_ones = tf.tile(y_ones, [1, window_size, 1]) # (N, window_size, 1) + patch_y = tf.concat([patch_y, y_ones], axis=1) # (N, window_size + 1, 1) + patch_y = tf.tile(patch_y, [1, 1, window_size + 1]) # (N, window_size + 1, window_size + 1) + patch_y = tf.cumsum(patch_y, axis=1) # (N, window_size + 1, window_size + 1) + patch_y0 = tf.cast(tf.floor(patch_y), tf.int32) # (N, window_size + 1, window_size + 1) + patch_y1 = patch_y0 + 1 # (N, window_size + 1, window_size + 1) + + # get pixel value at corner coords + valid_canvas_patch_a = self.get_pixel_value(patch_image_pad1, patch_x0, patch_y0) + valid_canvas_patch_b = self.get_pixel_value(patch_image_pad1, patch_x0, patch_y1) + valid_canvas_patch_c = self.get_pixel_value(patch_image_pad1, patch_x1, patch_y0) + valid_canvas_patch_d = self.get_pixel_value(patch_image_pad1, patch_x1, patch_y1) + # (N, window_size + 1, window_size + 1, 1) + + patch_x0 = tf.cast(patch_x0, tf.float32) + patch_x1 = tf.cast(patch_x1, tf.float32) + patch_y0 = tf.cast(patch_y0, tf.float32) + patch_y1 = tf.cast(patch_y1, tf.float32) + + # calculate deltas + wa = (patch_x1 - patch_x) * (patch_y1 - patch_y) + wb = (patch_x1 - patch_x) * (patch_y - patch_y0) + wc = (patch_x - patch_x0) * (patch_y1 - patch_y) + wd = (patch_x - patch_x0) * (patch_y - patch_y0) + # (N, window_size + 1, window_size + 1) + + # add dimension for addition + wa = tf.expand_dims(wa, axis=3) + wb = tf.expand_dims(wb, axis=3) + wc = tf.expand_dims(wc, axis=3) + wd = tf.expand_dims(wd, axis=3) + # (N, window_size + 1, window_size + 1, 1) + + # compute output + valid_canvas_patch_ = tf.add_n([wa * valid_canvas_patch_a, + wb * valid_canvas_patch_b, + wc * valid_canvas_patch_c, + wd * valid_canvas_patch_d]) # (N, window_size + 1, window_size + 1, 1) + return valid_canvas_patch_ + + def image_pasting(self, cursor_position_norm, patch_img, image_size, window_sizes, is_differentiable=False): + """ + paste the patch_img to padded size based on cursor_position + :param cursor_position_norm: (N, 1, 2), float type, in size [0.0, 1.0) + :param patch_img: (N, raster_size, raster_size), [0.0-BG, 1.0-stroke] + :param window_sizes: (N, 1, 1), float32, with grad + :return: + """ + cursor_position = tf.multiply(cursor_position_norm, tf.cast(image_size, tf.float32)) # in large size + window_sizes_r = tf.round(window_sizes) # (N, 1, 1), no grad + + patch_img_ = tf.expand_dims(patch_img, axis=-1) # (N, raster_size, raster_size, 1) + cursor_position_step = tf.reshape(cursor_position, (-1, 1, 1, 2)) # (N, 1, 1, 2) + cursor_position_step = tf.tile(cursor_position_step, [1, self.hps.raster_size, self.hps.raster_size, + 1]) # (N, raster_size, raster_size, 2) + image_size_tile = tf.reshape(tf.cast(image_size, tf.float32), (1, 1, 1, 1)) # (N, 1, 1, 1) + image_size_tile = tf.tile(image_size_tile, [self.hps.batch_size // self.total_loop, self.hps.raster_size, + self.hps.raster_size, 1]) + window_sizes_tile = tf.reshape(window_sizes_r, (-1, 1, 1, 1)) # (N, 1, 1, 1) + window_sizes_tile = tf.tile(window_sizes_tile, [1, self.hps.raster_size, self.hps.raster_size, 1]) + + pasting_inputs = tf.concat([patch_img_, cursor_position_step, image_size_tile, window_sizes_tile], + axis=-1) # (N, raster_size, raster_size, 5) + + if is_differentiable: + curr_paste_imgs = tf.map_fn(self.image_pasting_diff_single, pasting_inputs, + parallel_iterations=32) # (N, image_size, image_size, 1) + else: + curr_paste_imgs = tf.map_fn(self.image_pasting_nondiff_single, pasting_inputs, + parallel_iterations=32) # (N, image_size, image_size, 1) + curr_paste_imgs = tf.squeeze(curr_paste_imgs, axis=-1) # (N, image_size, image_size) + return curr_paste_imgs + + def image_pasting_v3(self, cursor_position_norm, patch_img, image_size, window_sizes, is_differentiable=False): + """ + paste the patch_img to padded size based on cursor_position + :param cursor_position_norm: (N, 1, 2), float type, in size [0.0, 1.0) + :param patch_img: (N, raster_size, raster_size), [0.0-BG, 1.0-stroke] + :param window_sizes: (N, 1, 1), float32, with grad + :return: + """ + cursor_position = tf.multiply(cursor_position_norm, tf.cast(image_size, tf.float32)) # in large size + + if is_differentiable: + patch_img_ = tf.expand_dims(patch_img, axis=-1) # (N, raster_size, raster_size, 1) + cursor_position_step = tf.reshape(cursor_position, (-1, 1, 1, 2)) # (N, 1, 1, 2) + cursor_position_step = tf.tile(cursor_position_step, [1, self.hps.raster_size, self.hps.raster_size, + 1]) # (N, raster_size, raster_size, 2) + image_size_tile = tf.reshape(tf.cast(image_size, tf.float32), (1, 1, 1, 1)) # (N, 1, 1, 1) + image_size_tile = tf.tile(image_size_tile, [self.hps.batch_size // self.total_loop, self.hps.raster_size, + self.hps.raster_size, 1]) + window_sizes_tile = tf.reshape(window_sizes, (-1, 1, 1, 1)) # (N, 1, 1, 1) + window_sizes_tile = tf.tile(window_sizes_tile, [1, self.hps.raster_size, self.hps.raster_size, 1]) + + pasting_inputs = tf.concat([patch_img_, cursor_position_step, image_size_tile, window_sizes_tile], + axis=-1) # (N, raster_size, raster_size, 5) + curr_paste_imgs = tf.map_fn(self.image_pasting_diff_single_v3, pasting_inputs, + parallel_iterations=32) # (N, image_size, image_size, 1) + else: + raise Exception('Unfinished...') + curr_paste_imgs = tf.squeeze(curr_paste_imgs, axis=-1) # (N, image_size, image_size) + return curr_paste_imgs + + def get_points_and_raster_image(self, initial_state, init_cursor, input_photo, image_size): + ## generate the other_params and pen_ras and raster image for raster loss + prev_state = initial_state # (N, dec_rnn_size * 3) + + prev_width = self.init_width # (1) + prev_width = tf.expand_dims(tf.expand_dims(prev_width, axis=0), axis=0) # (1, 1, 1) + prev_width = tf.tile(prev_width, [self.hps.batch_size // self.total_loop, 1, 1]) # (N, 1, 1) + + prev_scaling = tf.ones((self.hps.batch_size // self.total_loop, 1, 1)) # (N, 1, 1) + prev_window_size = tf.ones((self.hps.batch_size // self.total_loop, 1, 1), + dtype=tf.float32) * float(self.hps.raster_size) # (N, 1, 1) + + cursor_position_temp = init_cursor + self.cursor_position = cursor_position_temp # (N, 1, 2), in size [0.0, 1.0) + cursor_position_loop = self.cursor_position + + other_params_list = [] + pen_ras_list = [] + + pos_before_max_min_list = [] + win_size_before_max_min_list = [] + + curr_canvas_soft = tf.zeros_like(input_photo[:, :, :, 0]) # (N, image_size, image_size), [0.0-BG, 1.0-stroke] + curr_canvas_soft_rgb = tf.tile(tf.zeros_like(input_photo[:, :, :, 0:1]), [1, 1, 1, 3]) # (N, image_size, image_size, 3), [0.0-BG, 1.0-stroke] + curr_canvas_hard = tf.zeros_like(curr_canvas_soft) # [0.0-BG, 1.0-stroke] + + #### sampling part - start #### + self.curr_canvas_hard = curr_canvas_hard + + rasterizor_st = NeuralRasterizorStep( + raster_size=self.hps.raster_size, + position_format=self.hps.position_format) + + if self.hps.cropping_type == 'v3': + cropping_func = self.image_cropping_v3 + # elif self.hps.cropping_type == 'v2': + # cropping_func = self.image_cropping + else: + raise Exception('Unknown cropping_type', self.hps.cropping_type) + + if self.hps.pasting_type == 'v3': + pasting_func = self.image_pasting_v3 + # elif self.hps.pasting_type == 'v2': + # pasting_func = self.image_pasting + else: + raise Exception('Unknown pasting_type', self.hps.pasting_type) + + for time_i in range(self.hps.max_seq_len): + cursor_position_non_grad = tf.stop_gradient(cursor_position_loop) # (N, 1, 2), in size [0.0, 1.0) + + curr_window_size = tf.multiply(prev_scaling, tf.stop_gradient(prev_window_size)) # float, with grad + curr_window_size = tf.maximum(curr_window_size, tf.cast(self.hps.min_window_size, tf.float32)) + curr_window_size = tf.minimum(curr_window_size, tf.cast(image_size, tf.float32)) + + ## patch-level encoding + # Here, we make the gradients from canvas_z to curr_canvas_hard be None to avoid recurrent gradient propagation. + curr_canvas_hard_non_grad = tf.stop_gradient(self.curr_canvas_hard) + curr_canvas_hard_non_grad = tf.expand_dims(curr_canvas_hard_non_grad, axis=-1) + + # input_photo: (N, image_size, image_size, 1/3), [0.0-stroke, 1.0-BG] + crop_inputs = tf.concat([1.0 - input_photo, curr_canvas_hard_non_grad], axis=-1) # (N, H_p, W_p, 1/3+1) + + cropped_outputs = cropping_func(cursor_position_non_grad, crop_inputs, image_size, curr_window_size) + index_offset = self.hps.input_channel - 1 + curr_patch_inputs = cropped_outputs[:, :, :, 0:1 + index_offset] # [0.0-BG, 1.0-stroke] + curr_patch_canvas_hard_non_grad = cropped_outputs[:, :, :, 1 + index_offset:2 + index_offset] + # (N, raster_size, raster_size, 1), [0.0-BG, 1.0-stroke] + + curr_patch_inputs = 1.0 - curr_patch_inputs # [0.0-stroke, 1.0-BG] + curr_patch_inputs = self.normalize_image_m1to1(curr_patch_inputs) + # (N, raster_size, raster_size, 1/3), [-1.0-stroke, 1.0-BG] + + # Normalizing image + curr_patch_canvas_hard_non_grad = 1.0 - curr_patch_canvas_hard_non_grad # [0.0-stroke, 1.0-BG] + curr_patch_canvas_hard_non_grad = self.normalize_image_m1to1(curr_patch_canvas_hard_non_grad) # [-1.0-stroke, 1.0-BG] + + ## image-level encoding + combined_z = self.build_combined_encoder( + curr_patch_canvas_hard_non_grad, + curr_patch_inputs, + 1.0 - curr_canvas_hard_non_grad, + input_photo, + cursor_position_non_grad, + image_size, + curr_window_size) # (N, z_size) + combined_z = tf.expand_dims(combined_z, axis=1) # (N, 1, z_size) + + curr_window_size_top_side_norm_non_grad = \ + tf.stop_gradient(curr_window_size / tf.cast(image_size, tf.float32)) + curr_window_size_bottom_side_norm_non_grad = \ + tf.stop_gradient(curr_window_size / tf.cast(self.hps.min_window_size, tf.float32)) + if not self.hps.concat_win_size: + combined_z = tf.concat([tf.stop_gradient(prev_width), combined_z], 2) # (N, 1, 2+z_size) + else: + combined_z = tf.concat([tf.stop_gradient(prev_width), + curr_window_size_top_side_norm_non_grad, + curr_window_size_bottom_side_norm_non_grad, + combined_z], + 2) # (N, 1, 2+z_size) + + if self.hps.concat_cursor: + prev_input_x = tf.concat([cursor_position_non_grad, combined_z], 2) # (N, 1, 2+2+z_size) + else: + prev_input_x = combined_z # (N, 1, 2+z_size) + + h_output, next_state = self.build_seq_decoder(self.dec_cell, prev_input_x, prev_state) + # h_output: (N * 1, n_out), next_state: (N, dec_rnn_size * 3) + [o_other_params, o_pen_ras] = self.get_mixture_coef(h_output) + # o_other_params: (N * 1, 6) + # o_pen_ras: (N * 1, 2), after softmax + + o_other_params = tf.reshape(o_other_params, [-1, 1, 6]) # (N, 1, 6) + o_pen_ras_raw = tf.reshape(o_pen_ras, [-1, 1, 2]) # (N, 1, 2) + + other_params_list.append(o_other_params) + pen_ras_list.append(o_pen_ras_raw) + + #### sampling part - end #### + + if self.hps.model_mode == 'train' or self.hps.model_mode == 'eval' or self.hps.model_mode == 'eval_sample': + # use renderer here to convert the strokes to image + curr_other_params = tf.squeeze(o_other_params, axis=1) # (N, 6), (x1, y1)=[0.0, 1.0], (x2, y2)=[-1.0, 1.0] + x1y1, x2y2, width2, scaling = curr_other_params[:, 0:2], curr_other_params[:, 2:4],\ + curr_other_params[:, 4:5], curr_other_params[:, 5:6] + x0y0 = tf.zeros_like(x2y2) # (N, 2), [-1.0, 1.0] + x0y0 = tf.div(tf.add(x0y0, 1.0), 2.0) # (N, 2), [0.0, 1.0] + x2y2 = tf.div(tf.add(x2y2, 1.0), 2.0) # (N, 2), [0.0, 1.0] + widths = tf.concat([tf.squeeze(prev_width, axis=1), width2], axis=1) # (N, 2) + curr_other_params = tf.concat([x0y0, x1y1, x2y2, widths], axis=-1) # (N, 8), (x0, y0)&(x2, y2)=[0.0, 1.0] + curr_stroke_image = rasterizor_st.raster_func_stroke_abs(curr_other_params) + # (N, raster_size, raster_size), [0.0-BG, 1.0-stroke] + + curr_stroke_image_large = pasting_func(cursor_position_loop, curr_stroke_image, + image_size, curr_window_size, + is_differentiable=self.hps.pasting_diff) + # (N, image_size, image_size), [0.0-BG, 1.0-stroke] + + ## soft + if not self.hps.use_softargmax: + curr_state_soft = o_pen_ras[:, 1:2] # (N, 1) + else: + curr_state_soft = self.differentiable_argmax(o_pen_ras, self.hps.soft_beta) # (N, 1) + + curr_state_soft = tf.expand_dims(curr_state_soft, axis=1) # (N, 1, 1) + + filter_curr_stroke_image_soft = tf.multiply(tf.subtract(1.0, curr_state_soft), curr_stroke_image_large) + # (N, image_size, image_size), [0.0-BG, 1.0-stroke] + curr_canvas_soft = tf.add(curr_canvas_soft, filter_curr_stroke_image_soft) # [0.0-BG, 1.0-stroke] + + ## hard + curr_state_hard = tf.expand_dims(tf.cast(tf.argmax(o_pen_ras_raw, axis=-1), dtype=tf.float32), + axis=-1) # (N, 1, 1) + filter_curr_stroke_image_hard = tf.multiply(tf.subtract(1.0, curr_state_hard), curr_stroke_image_large) + # (N, image_size, image_size), [0.0-BG, 1.0-stroke] + self.curr_canvas_hard = tf.add(self.curr_canvas_hard, filter_curr_stroke_image_hard) # [0.0-BG, 1.0-stroke] + self.curr_canvas_hard = tf.clip_by_value(self.curr_canvas_hard, 0.0, 1.0) # [0.0-BG, 1.0-stroke] + + next_width = o_other_params[:, :, 4:5] + next_scaling = o_other_params[:, :, 5:6] + next_window_size = tf.multiply(next_scaling, tf.stop_gradient(curr_window_size)) # float, with grad + window_size_before_max_min = next_window_size # (N, 1, 1), large-level + win_size_before_max_min_list.append(window_size_before_max_min) + next_window_size = tf.maximum(next_window_size, tf.cast(self.hps.min_window_size, tf.float32)) + next_window_size = tf.minimum(next_window_size, tf.cast(image_size, tf.float32)) + + prev_state = next_state + prev_width = next_width * curr_window_size / next_window_size # (N, 1, 1) + prev_scaling = next_scaling # (N, 1, 1)) + prev_window_size = curr_window_size + + # update the cursor position + new_cursor_offsets = tf.multiply(o_other_params[:, :, 2:4], + tf.divide(curr_window_size, 2.0)) # (N, 1, 2), window-level + new_cursor_offset_next = new_cursor_offsets + new_cursor_offset_next = tf.concat([new_cursor_offset_next[:, :, 1:2], new_cursor_offset_next[:, :, 0:1]], axis=-1) + + cursor_position_loop_large = tf.multiply(cursor_position_loop, tf.cast(image_size, tf.float32)) + + if self.hps.stop_accu_grad: + stroke_position_next = tf.stop_gradient(cursor_position_loop_large) + new_cursor_offset_next # (N, 1, 2), large-level + else: + stroke_position_next = cursor_position_loop_large + new_cursor_offset_next # (N, 1, 2), large-level + + stroke_position_before_max_min = stroke_position_next # (N, 1, 2), large-level + pos_before_max_min_list.append(stroke_position_before_max_min) + + if self.hps.cursor_type == 'next': + cursor_position_loop_large = stroke_position_next # (N, 1, 2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_position_loop_large = tf.maximum(cursor_position_loop_large, 0.0) + cursor_position_loop_large = tf.minimum(cursor_position_loop_large, tf.cast(image_size - 1, tf.float32)) + cursor_position_loop = tf.div(cursor_position_loop_large, tf.cast(image_size, tf.float32)) + + curr_canvas_soft = tf.clip_by_value(curr_canvas_soft, 0.0, 1.0) # (N, raster_size, raster_size), [0.0-BG, 1.0-stroke] + + other_params_ = tf.reshape(tf.concat(other_params_list, axis=1), [-1, 6]) # (N * max_seq_len, 6) + pen_ras_ = tf.reshape(tf.concat(pen_ras_list, axis=1), [-1, 2]) # (N * max_seq_len, 2) + pos_before_max_min_ = tf.concat(pos_before_max_min_list, axis=1) # (N, max_seq_len, 2) + win_size_before_max_min_ = tf.concat(win_size_before_max_min_list, axis=1) # (N, max_seq_len, 1) + + return other_params_, pen_ras_, prev_state, curr_canvas_soft, curr_canvas_soft_rgb, \ + pos_before_max_min_, win_size_before_max_min_ + + def differentiable_argmax(self, input_pen, soft_beta): + """ + Differentiable argmax trick. + :param input_pen: (N, n_class) + :return: pen_state: (N, 1) + """ + def sign_onehot(x): + """ + :param x: (N, n_class) + :return: (N, n_class) + """ + y = tf.sign(tf.reduce_max(x, axis=-1, keepdims=True) - x) + y = (y - 1) * (-1) + return y + + def softargmax(x, beta=1e2): + """ + :param x: (N, n_class) + :param beta: 1e10 is the best. 1e2 is acceptable. + :return: (N) + """ + x_range = tf.cumsum(tf.ones_like(x), axis=1) # (N, 2) + return tf.reduce_sum(tf.nn.softmax(x * beta) * x_range, axis=1) - 1 + + ## Better to use softargmax(beta=1e2). The sign_onehot's gradient is close to zero. + # pen_onehot = sign_onehot(input_pen) # one-hot form, (N * max_seq_len, 2) + # pen_state = pen_onehot[:, 1:2] # (N * max_seq_len, 1) + pen_state = softargmax(input_pen, soft_beta) + pen_state = tf.expand_dims(pen_state, axis=1) # (N * max_seq_len, 1) + return pen_state + + def build_losses(self, target_sketch, pred_raster_imgs, pred_params, + pos_before_max_min, win_size_before_max_min, image_size): + def get_raster_loss(pred_imgs, gt_imgs, loss_type): + perc_layer_losses_raw = [] + perc_layer_losses_weighted = [] + perc_layer_losses_norm = [] + + if loss_type == 'l1': + ras_cost = tf.reduce_mean(tf.abs(tf.subtract(gt_imgs, pred_imgs))) # () + elif loss_type == 'l1_small': + gt_imgs_small = tf.image.resize_images(tf.expand_dims(gt_imgs, axis=3), (32, 32)) + pred_imgs_small = tf.image.resize_images(tf.expand_dims(pred_imgs, axis=3), (32, 32)) + ras_cost = tf.reduce_mean(tf.abs(tf.subtract(gt_imgs_small, pred_imgs_small))) # () + elif loss_type == 'mse': + ras_cost = tf.reduce_mean(tf.pow(tf.subtract(gt_imgs, pred_imgs), 2)) # () + elif loss_type == 'perceptual': + return_map_pred = vgg_net_slim(pred_imgs, image_size) + return_map_gt = vgg_net_slim(gt_imgs, image_size) + perc_loss_type = 'l1' # [l1, mse] + weighted_map = {'ReLU1_1': 100.0, 'ReLU1_2': 100.0, + 'ReLU2_1': 100.0, 'ReLU2_2': 100.0, + 'ReLU3_1': 10.0, 'ReLU3_2': 10.0, 'ReLU3_3': 10.0, + 'ReLU4_1': 1.0, 'ReLU4_2': 1.0, 'ReLU4_3': 1.0, + 'ReLU5_1': 1.0, 'ReLU5_2': 1.0, 'ReLU5_3': 1.0} + + for perc_layer in self.hps.perc_loss_layers: + if perc_loss_type == 'l1': + perc_layer_loss = tf.reduce_mean(tf.abs(tf.subtract(return_map_pred[perc_layer], + return_map_gt[perc_layer]))) # () + elif perc_loss_type == 'mse': + perc_layer_loss = tf.reduce_mean(tf.pow(tf.subtract(return_map_pred[perc_layer], + return_map_gt[perc_layer]), 2)) # () + else: + raise NameError('Unknown perceptual loss type:', perc_loss_type) + perc_layer_losses_raw.append(perc_layer_loss) + + assert perc_layer in weighted_map + perc_layer_losses_weighted.append(perc_layer_loss * weighted_map[perc_layer]) + + for loop_i in range(len(self.hps.perc_loss_layers)): + perc_relu_loss_raw = perc_layer_losses_raw[loop_i] # () + + if self.hps.model_mode == 'train': + curr_relu_mean = (self.perc_loss_mean_list[loop_i] * self.last_step_num + perc_relu_loss_raw) / (self.last_step_num + 1.0) + relu_cost_norm = perc_relu_loss_raw / curr_relu_mean + else: + relu_cost_norm = perc_relu_loss_raw + perc_layer_losses_norm.append(relu_cost_norm) + + perc_layer_losses_raw = tf.stack(perc_layer_losses_raw, axis=0) + perc_layer_losses_norm = tf.stack(perc_layer_losses_norm, axis=0) + + if self.hps.perc_loss_fuse_type == 'max': + ras_cost = tf.reduce_max(perc_layer_losses_norm) + elif self.hps.perc_loss_fuse_type == 'add': + ras_cost = tf.reduce_mean(perc_layer_losses_norm) + elif self.hps.perc_loss_fuse_type == 'raw_add': + ras_cost = tf.reduce_mean(perc_layer_losses_raw) + elif self.hps.perc_loss_fuse_type == 'weighted_sum': + ras_cost = tf.reduce_mean(perc_layer_losses_weighted) + else: + raise NameError('Unknown perc_loss_fuse_type:', self.hps.perc_loss_fuse_type) + + elif loss_type == 'triplet': + raise Exception('Solution for triplet loss is coming soon.') + else: + raise NameError('Unknown loss type:', loss_type) + + if loss_type != 'perceptual': + for perc_layer_i in self.hps.perc_loss_layers: + perc_layer_losses_raw.append(tf.constant(0.0)) + perc_layer_losses_norm.append(tf.constant(0.0)) + + perc_layer_losses_raw = tf.stack(perc_layer_losses_raw, axis=0) + perc_layer_losses_norm = tf.stack(perc_layer_losses_norm, axis=0) + + return ras_cost, perc_layer_losses_raw, perc_layer_losses_norm + + gt_raster_images = tf.squeeze(target_sketch, axis=3) # (N, raster_h, raster_w), [0.0-stroke, 1.0-BG] + raster_cost, perc_relu_losses_raw, perc_relu_losses_norm = \ + get_raster_loss(pred_raster_imgs, gt_raster_images, loss_type=self.hps.raster_loss_base_type) + + def get_stroke_num_loss(input_strokes): + ending_state = input_strokes[:, :, 0] # (N, seq_len) + stroke_num_loss_pre = tf.reduce_mean(ending_state) # larger is better, [0.0, 1.0] + stroke_num_loss = 1.0 - stroke_num_loss_pre # lower is better, [0.0, 1.0] + return stroke_num_loss + + stroke_num_cost = get_stroke_num_loss(pred_params) # lower is better + + def get_pos_outside_loss(pos_before_max_min_): + pos_after_max_min = tf.maximum(pos_before_max_min_, 0.0) + pos_after_max_min = tf.minimum(pos_after_max_min, tf.cast(image_size - 1, tf.float32)) # (N, max_seq_len, 2) + pos_outside_loss = tf.reduce_mean(tf.abs(pos_before_max_min_ - pos_after_max_min)) + return pos_outside_loss + + pos_outside_cost = get_pos_outside_loss(pos_before_max_min) # lower is better + + def get_win_size_outside_loss(win_size_before_max_min_, min_window_size): + win_size_outside_top_loss = tf.divide( + tf.maximum(win_size_before_max_min_ - tf.cast(image_size, tf.float32), 0.0), + tf.cast(image_size, tf.float32)) # (N, max_seq_len, 1) + win_size_outside_bottom_loss = tf.divide( + tf.maximum(tf.cast(min_window_size, tf.float32) - win_size_before_max_min_, 0.0), + tf.cast(min_window_size, tf.float32)) # (N, max_seq_len, 1) + win_size_outside_loss = tf.reduce_mean(win_size_outside_top_loss + win_size_outside_bottom_loss) + return win_size_outside_loss + + win_size_outside_cost = get_win_size_outside_loss(win_size_before_max_min, self.hps.min_window_size) # lower is better + + def get_early_pen_states_loss(input_strokes, curr_start, curr_end): + # input_strokes: (N, max_seq_len, 7) + pred_early_pen_states = input_strokes[:, curr_start:curr_end, 0] # (N, curr_early_len) + pred_early_pen_states_min = tf.reduce_min(pred_early_pen_states, axis=1) # (N), should not be 1 + early_pen_states_loss = tf.reduce_mean(pred_early_pen_states_min) # lower is better + return early_pen_states_loss + + early_pen_states_cost = get_early_pen_states_loss(pred_params, + self.early_pen_loss_start_idx, self.early_pen_loss_end_idx) + + return raster_cost, stroke_num_cost, pos_outside_cost, win_size_outside_cost, \ + early_pen_states_cost, \ + perc_relu_losses_raw, perc_relu_losses_norm + + def build_training_op_split(self, raster_cost, sn_cost, cursor_outside_cost, win_size_outside_cost, + early_pen_states_cost): + total_cost = self.hps.raster_loss_weight * raster_cost + \ + self.hps.early_pen_loss_weight * early_pen_states_cost + \ + self.stroke_num_loss_weight * sn_cost + \ + self.hps.outside_loss_weight * cursor_outside_cost + \ + self.hps.win_size_outside_loss_weight * win_size_outside_cost + + tvars = [var for var in tf.trainable_variables() + if 'raster_unit' not in var.op.name and 'VGG16' not in var.op.name] + gvs = self.optimizer.compute_gradients(total_cost, var_list=tvars) + return total_cost, gvs + + def build_training_op(self, grad_list): + with tf.variable_scope('train_op', reuse=tf.AUTO_REUSE): + gvs = self.average_gradients(grad_list) + g = self.hps.grad_clip + + for grad, var in gvs: + print('>>', var.op.name) + if grad is None: + print(' >> None value') + + capped_gvs = [(tf.clip_by_value(grad, -g, g), var) for grad, var in gvs] + + self.train_op = self.optimizer.apply_gradients( + capped_gvs, global_step=self.global_step, name='train_step') + + def average_gradients(self, grads_list): + """ + Compute the average gradients. + :param grads_list: list(of length N_GPU) of list(grad, var) + :return: + """ + avg_grads = [] + for grad_and_vars in zip(*grads_list): + grads = [] + for g, _ in grad_and_vars: + expanded_g = tf.expand_dims(g, 0) + grads.append(expanded_g) + grad = tf.concat(grads, axis=0) + grad = tf.reduce_mean(grad, axis=0) + + v = grad_and_vars[0][1] + grad_and_var = (grad, v) + avg_grads.append(grad_and_var) + + return avg_grads \ No newline at end of file diff --git a/robot_painting/qmupd_vs/models/__init__.py b/robot_painting/qmupd_vs/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fc01113da66ff042bd1807b5bfdb70c4bce8d14c --- /dev/null +++ b/robot_painting/qmupd_vs/models/__init__.py @@ -0,0 +1,67 @@ +"""This package contains modules related to objective functions, optimizations, and network architectures. + +To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. +You need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate loss, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + +In the function <__init__>, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): define networks used in our training. + -- self.visual_names (str list): specify the images that you want to display and save. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. + +Now you can use the model class by specifying flag '--model dummy'. +See our template model class 'template_model.py' for more details. +""" + +import importlib +from models.base_model import BaseModel + + +def find_model_using_name(model_name): + """Import the module "models/[model_name]_model.py". + + In the file, the class called DatasetNameModel() will + be instantiated. It has to be a subclass of BaseModel, + and it is case-insensitive. + """ + model_filename = "models." + model_name + "_model" + modellib = importlib.import_module(model_filename) + model = None + target_model_name = model_name.replace('_', '') + 'model' + for name, cls in modellib.__dict__.items(): + if name.lower() == target_model_name.lower() \ + and issubclass(cls, BaseModel): + model = cls + + if model is None: + print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) + exit(0) + + return model + + +def get_option_setter(model_name): + """Return the static method of the model class.""" + model_class = find_model_using_name(model_name) + return model_class.modify_commandline_options + + +def create_model(opt): + """Create a model given the option. + + This function warps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from models import create_model + >>> model = create_model(opt) + """ + model = find_model_using_name(opt.model) + instance = model(opt) + print("model [%s] was created" % type(instance).__name__) + return instance diff --git a/robot_painting/qmupd_vs/models/base_model.py b/robot_painting/qmupd_vs/models/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..d06337d4ee138db99a94032b40fe6ad9c8627f4b --- /dev/null +++ b/robot_painting/qmupd_vs/models/base_model.py @@ -0,0 +1,248 @@ +import os +import torch +from collections import OrderedDict +from abc import ABCMeta, abstractmethod +from . import networks +import pdb + + +class BaseModel(): + __metaclass__ = ABCMeta + """This class is an abstract base class (ABC) for models. + To create a subclass, you need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate losses, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the BaseModel class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + + When creating your custom class, you need to implement your own initialization. + In this fucntion, you should first call + Then, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): specify the images that you want to display and save. + -- self.visual_names (str list): define networks used in our training. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + """ + self.opt = opt + self.gpu_ids = opt.gpu_ids + self.isTrain = opt.isTrain + self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU + self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir + if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. + torch.backends.cudnn.benchmark = True + self.loss_names = [] + self.model_names = [] + self.visual_names = [] + self.optimizers = [] + self.image_paths = [] + self.metric = 0 # used for learning rate policy 'plateau' + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new model-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): includes the data itself and its metadata information. + """ + pass + + @abstractmethod + def forward(self): + """Run forward pass; called by both functions and .""" + pass + + @abstractmethod + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + pass + + def setup(self, opt): + """Load and print networks; create schedulers + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + if self.isTrain: + self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] + if not self.isTrain or opt.continue_train: + load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch + self.load_networks(load_suffix) + self.print_networks(opt.verbose) + + def eval(self): + """Make models eval mode during test time""" + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + net.eval() + + def test(self): + """Forward function used in test time. + + This function wraps function in no_grad() so we don't save intermediate steps for backprop + It also calls to produce additional visualization results + """ + with torch.no_grad(): + self.forward() + self.compute_visuals() + + def compute_visuals(self): + """Calculate additional output images for visdom and HTML visualization""" + pass + + def get_image_paths(self): + """ Return image paths that are used to load current data""" + return self.image_paths + + def update_learning_rate(self): + """Update learning rates for all the networks; called at the end of every epoch""" + for scheduler in self.schedulers: + if self.opt.lr_policy == 'plateau': + scheduler.step(self.metric) + else: + scheduler.step() + + lr = self.optimizers[0].param_groups[0]['lr'] + print('learning rate = %.7f' % lr) + + def get_current_visuals(self): + """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" + visual_ret = OrderedDict() + for name in self.visual_names: + if isinstance(name, str): + visual_ret[name] = getattr(self, name) + return visual_ret + + def get_current_losses(self): + """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" + errors_ret = OrderedDict() + for name in self.loss_names: + if isinstance(name, str): + errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number + return errors_ret + + def save_networks(self, epoch): + """Save all the networks to the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names: + if isinstance(name, str): + save_filename = '%s_net_%s.pth' % (epoch, name) + save_path = os.path.join(self.save_dir, save_filename) + net = getattr(self, 'net' + name) + + if len(self.gpu_ids) > 0 and torch.cuda.is_available(): + torch.save(net.module.cpu().state_dict(), save_path) + net.cuda(self.gpu_ids[0]) + else: + torch.save(net.cpu().state_dict(), save_path) + + def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): + """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + + def load_networks(self, epoch): + """Load all the networks from the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names: + if isinstance(name, str): + load_filename = '%s_net_%s.pth' % (epoch, name) + load_path = os.path.join(self.save_dir, load_filename) + net = getattr(self, 'net' + name) + if isinstance(net, torch.nn.DataParallel): + net = net.module + print('loading the model from %s' % load_path) + # if you are using PyTorch newer than 0.4 (e.g., built from + # GitHub source), you can remove str() on self.device + state_dict = torch.load(load_path, map_location=str(self.device)) + if hasattr(state_dict, '_metadata'): + del state_dict._metadata + + # patch InstanceNorm checkpoints prior to 0.4 + for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop + self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) + net.load_state_dict(state_dict) + #param1 = {} + #for name, parameters in net.named_parameters(): + # print(name,',',parameters.size()) + # param1[name] = parameters.detach().cpu().numpy() + #pdb.set_trace() + + def print_networks(self, verbose): + """Print the total number of parameters in the network and (if verbose) network architecture + + Parameters: + verbose (bool) -- if verbose: print the network architecture + """ + print('---------- Networks initialized -------------') + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + if verbose: + print(net) + print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) + print('-----------------------------------------------') + + def set_requires_grad(self, nets, requires_grad=False): + """Set requies_grad=Fasle for all the networks to avoid unnecessary computations + Parameters: + nets (network list) -- a list of networks + requires_grad (bool) -- whether the networks require gradients or not + """ + if not isinstance(nets, list): + nets = [nets] + for net in nets: + if net is not None: + for param in net.parameters(): + param.requires_grad = requires_grad + + # =========================================================================================================== + def masked(self, A,mask): + if self.opt.mask_type == 0: + return (A/2+0.5)*mask*2-1 + elif self.opt.mask_type == 1: + return ((A/2+0.5)*mask+1-mask)*2-1 + elif self.opt.mask_type == 2: + return torch.cat((A, mask), 1) + elif self.opt.mask_type == 3: + masked = ((A/2+0.5)*mask+1-mask)*2-1 + return torch.cat((masked, mask), 1) \ No newline at end of file diff --git a/robot_painting/qmupd_vs/models/cycle_gan_cls_model.py b/robot_painting/qmupd_vs/models/cycle_gan_cls_model.py new file mode 100644 index 0000000000000000000000000000000000000000..8883fcec78f150470728571ae2c1c6f9fbbd0346 --- /dev/null +++ b/robot_painting/qmupd_vs/models/cycle_gan_cls_model.py @@ -0,0 +1,565 @@ +import torch +import itertools +from util.image_pool import ImagePool +from .base_model import BaseModel +from . import networks +import models.dist_model as dm # numpy==1.14.3 +import torchvision.transforms as transforms +import os +from util.util import tensor2im, tensor2im2, save_image + +def truncate(fake_B,a=127.5):#[-1,1] + #return torch.round((fake_B+1)*a)/a-1 + return ((fake_B+1)*a).int().float()/a-1 + +class CycleGANClsModel(BaseModel): + """ + This class implements the CycleGAN model, for learning image-to-image translation without paired data. + + The model training requires '--dataset_mode unaligned' dataset. + By default, it uses a '--netG resnet_9blocks' ResNet generator, + a '--netD basic' discriminator (PatchGAN introduced by pix2pix), + and a least-square GANs objective ('--gan_mode lsgan'). + + CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf + """ + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + + For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses. + A (source domain), B (target domain). + Generators: G_A: A -> B; G_B: B -> A. + Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. + Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper) + Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper) + Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper) + Dropout is not used in the original CycleGAN paper. + """ + parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout + parser.set_defaults(dataset_mode='unaligned_mask_stylecls') + parser.add_argument('--netda', type=str, default='basic_cls') # discriminator has two branches + parser.add_argument('--truncate', type=float, default=0.0, help='whether truncate in forward') + if is_train: + parser.add_argument('--lambda_A', type=float, default=5.0, help='weight for cycle loss (A -> B -> A)') + parser.add_argument('--lambda_B', type=float, default=5.0, help='weight for cycle loss (B -> A -> B)') + parser.add_argument('--lambda_identity', type=float, default=0, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') + parser.add_argument('--perceptual_cycle', type=int, default=6, help='whether use perceptual similarity for cycle loss') + parser.add_argument('--use_hed', type=int, default=1, help='whether use hed processing for cycle loss') + parser.add_argument('--ntrunc_trunc', type=int, default=1, help='whether use both non-trunc version and trunc version') + parser.add_argument('--trunc_a', type=float, default=31.875, help='multiply which value to round when trunc') + parser.add_argument('--lambda_A_trunc', type=float, default=5.0, help='weight for cycle loss for trunc') + parser.add_argument('--hed_pretrained_mode', type=str, default='./checkpoints/network-bsds500.pytorch', help='path to the pretrained hed model') + parser.add_argument('--vgg_pretrained_mode', type=str, default='./checkpoints/vgg19.pth', help='path to the pretrained vgg model') + parser.add_argument('--lambda_G_A_l', type=float, default=0.5, help='weight for local GAN loss in G') + parser.add_argument('--style_loss_with_weight', type=int, default=0, help='whether multiply prob in style loss') + parser.add_argument('--metric', action='store_true', help='whether use metric loss for fakeB') + parser.add_argument('--metric_model_path', type=str, default='3/30_net_Regressor.pth', help='metric model path') + parser.add_argument('--lambda_metric', type=float, default=0.5, help='weight for metric loss') + parser.add_argument('--metricvec', action='store_true', help='whether use metric model with vec input') + parser.add_argument('--metric_resnext', action='store_true', help='whether use resnext as metric model') + parser.add_argument('--metric_resnet', action='store_true', help='whether use resnet as metric model') + parser.add_argument('--metric_inception', action='store_true', help='whether use inception as metric model')# the inception of transform_input=False + parser.add_argument('--metric_inmask', action='store_true', help='whether use inmask in metric model') + else: + parser.add_argument('--check_D', action='store_true', help='whether use check Ds outputs') + # for masks + parser.add_argument('--use_mask', type=int, default=1, help='whether use mask for special face region') + parser.add_argument('--use_eye_mask', type=int, default=1, help='whether use mask for special face region') + parser.add_argument('--use_lip_mask', type=int, default=1, help='whether use mask for special face region') + parser.add_argument('--mask_type', type=int, default=3, help='use mask type, 0 outside black, 1 outside white') + # for style control + parser.add_argument('--style_control', type=int, default=1, help='use style_control') + parser.add_argument('--sfeature_mode', type=str, default='1vgg19_softmax', help='vgg19 softmax as feature') + parser.add_argument('--netga', type=str, default='resnet_style_9blocks', help='net arch for netG_A') + parser.add_argument('--model0_res', type=int, default=0, help='number of resblocks in model0 (before insert style)') + parser.add_argument('--model1_res', type=int, default=0, help='number of resblocks in model1 (after insert style, before 2 column merge)') + parser.add_argument('--one_hot', type=int, default=0, help='use one-hot for style code') + + return parser + + def __init__(self, opt): + """Initialize the CycleGAN class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseModel.__init__(self, opt) + # specify the training losses you want to print out. The training/test scripts will call + self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B'] + # specify the images you want to save/display. The training/test scripts will call + visual_names_A = ['real_A', 'fake_B', 'rec_A'] + visual_names_B = ['real_B', 'fake_A', 'rec_B'] + if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B) + visual_names_A.append('idt_B') + visual_names_B.append('idt_A') + if self.isTrain and self.opt.use_hed: + visual_names_A.append('real_A_hed') + visual_names_A.append('rec_A_hed') + if self.isTrain and self.opt.ntrunc_trunc: + visual_names_A.append('rec_At') + if self.opt.use_hed: + visual_names_A.append('rec_At_hed') + self.loss_names = ['D_A', 'G_A', 'cycle_A', 'cycle_A2', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B', 'G'] + if self.isTrain and self.opt.use_mask: + visual_names_A.append('fake_B_l') + visual_names_A.append('real_B_l') + self.loss_names += ['D_A_l', 'G_A_l'] + if self.isTrain and self.opt.use_eye_mask: + visual_names_A.append('fake_B_le') + visual_names_A.append('real_B_le') + self.loss_names += ['D_A_le', 'G_A_le'] + if self.isTrain and self.opt.use_lip_mask: + visual_names_A.append('fake_B_ll') + visual_names_A.append('real_B_ll') + self.loss_names += ['D_A_ll', 'G_A_ll'] + if self.isTrain and self.opt.metric: + self.loss_names += ['metric'] + #visual_names_B += ['fake_B2'] + if not self.isTrain and self.opt.use_mask: + visual_names_A.append('fake_B_l') + visual_names_A.append('real_B_l') + if not self.isTrain and self.opt.use_eye_mask: + visual_names_A.append('fake_B_le') + visual_names_A.append('real_B_le') + if not self.isTrain and self.opt.use_lip_mask: + visual_names_A.append('fake_B_ll') + visual_names_A.append('real_B_ll') + self.loss_names += ['D_A_cls','G_A_cls'] + + self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B + print(self.visual_names) + # specify the models you want to save to the disk. The training/test scripts will call and . + if self.isTrain: + self.model_names = ['G_A', 'G_B', 'D_A', 'D_B'] + if self.opt.use_mask: + self.model_names += ['D_A_l'] + if self.opt.use_eye_mask: + self.model_names += ['D_A_le'] + if self.opt.use_lip_mask: + self.model_names += ['D_A_ll'] + else: # during test time, only load Gs + self.model_names = ['G_A', 'G_B'] + if self.opt.check_D: + self.model_names += ['D_A', 'D_B'] + + # define networks (both Generators and discriminators) + # The naming is different from those used in the paper. + # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) + if not self.opt.style_control: + self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, + not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) + else: + print(opt.netga) + print('model0_res', opt.model0_res) + print('model1_res', opt.model1_res) + self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netga, opt.norm, + not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, opt.model0_res, opt.model1_res) + self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, + not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) + + #if self.isTrain: # define discriminators + if self.isTrain or self.opt.check_D: # define discriminators + self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netda, + opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids, n_class=3) + self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) + if self.opt.use_mask: + if self.opt.mask_type in [2, 3]: + output_nc = opt.output_nc + 1 + else: + output_nc = opt.output_nc + self.netD_A_l = networks.define_D(output_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) + if self.opt.use_eye_mask: + if self.opt.mask_type in [2, 3]: + output_nc = opt.output_nc + 1 + else: + output_nc = opt.output_nc + self.netD_A_le = networks.define_D(output_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) + if self.opt.use_lip_mask: + if self.opt.mask_type in [2, 3]: + output_nc = opt.output_nc + 1 + else: + output_nc = opt.output_nc + self.netD_A_ll = networks.define_D(output_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) + + if self.isTrain and self.opt.metric: + if not opt.metric_resnext and not opt.metric_resnet and not opt.metric_inception: + self.metric = networks.define_inception_v3a(init_weights_='./checkpoints/metric/'+self.opt.metric_model_path,gpu_ids_ = self.gpu_ids,vec=self.opt.metricvec) + elif opt.metric_resnext: + self.metric = networks.define_resnext101a(init_weights_='./checkpoints/metric/'+self.opt.metric_model_path,gpu_ids_ = self.gpu_ids,vec=self.opt.metricvec) + elif opt.metric_resnet: + self.metric = networks.define_resnet101a(init_weights_='./checkpoints/metric/'+self.opt.metric_model_path,gpu_ids_ = self.gpu_ids,vec=self.opt.metricvec) + elif opt.metric_inception: + self.metric = networks.define_inception3a(init_weights_='./checkpoints/metric/'+self.opt.metric_model_path,gpu_ids_ = self.gpu_ids,vec=self.opt.metricvec) + self.metric.eval() + self.set_requires_grad(self.metric, False) + + if not self.isTrain and self.opt.check_D: + self.criterionGAN = networks.GANLoss('lsgan').to(self.device) + + if self.isTrain: + if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels + assert(opt.input_nc == opt.output_nc) + self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images + self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images + # define loss functions + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss. + self.criterionCycle = torch.nn.L1Loss() + self.criterionIdt = torch.nn.L1Loss() + self.criterionCls = torch.nn.CrossEntropyLoss() + self.criterionCls2 = torch.nn.CrossEntropyLoss(reduction='none') + # initialize optimizers; schedulers will be automatically created by function . + self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) + if not self.opt.use_mask: + self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) + elif not self.opt.use_eye_mask: + D_params = list(self.netD_A.parameters()) + list(self.netD_B.parameters()) + list(self.netD_A_l.parameters()) + self.optimizer_D = torch.optim.Adam(D_params, lr=opt.lr, betas=(opt.beta1, 0.999)) + elif not self.opt.use_lip_mask: + D_params = list(self.netD_A.parameters()) + list(self.netD_B.parameters()) + list(self.netD_A_l.parameters()) + list(self.netD_A_le.parameters()) + self.optimizer_D = torch.optim.Adam(D_params, lr=opt.lr, betas=(opt.beta1, 0.999)) + else: + D_params = list(self.netD_A.parameters()) + list(self.netD_B.parameters()) + list(self.netD_A_l.parameters()) + list(self.netD_A_le.parameters()) + list(self.netD_A_ll.parameters()) + self.optimizer_D = torch.optim.Adam(D_params, lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + if self.opt.perceptual_cycle: + if self.opt.perceptual_cycle in [1,2,3,6]: + self.lpips = dm.DistModel(opt,model='net-lin',net='alex',use_gpu=True) + elif self.opt.perceptual_cycle in [4,5,8]: + self.vgg = networks.define_VGG(init_weights_=opt.vgg_pretrained_mode, feature_mode_=True, gpu_ids_=self.gpu_ids) # using conv4_4 layer + + if self.opt.use_hed: + #self.hed = networks.define_HED(init_weights_=opt.hed_pretrained_mode, gpu_ids_=self.gpu_ids) + self.hed = networks.define_HED(init_weights_=opt.hed_pretrained_mode, gpu_ids_=self.opt.gpu_ids_p) + self.set_requires_grad(self.hed, False) + + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): include the data itself and its metadata information. + + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + if self.opt.use_mask: + self.A_mask = input['A_mask'].to(self.device) + self.B_mask = input['B_mask'].to(self.device) + if self.opt.use_eye_mask: + self.A_maske = input['A_maske'].to(self.device) + self.B_maske = input['B_maske'].to(self.device) + if self.opt.use_lip_mask: + self.A_maskl = input['A_maskl'].to(self.device) + self.B_maskl = input['B_maskl'].to(self.device) + if self.opt.style_control: + self.real_B_style = input['B_style'].to(self.device) + self.real_B_label = input['B_label'].to(self.device) + if self.opt.isTrain and self.opt.style_loss_with_weight: + self.real_B_style0 = input['B_style0'].to(self.device) + self.zero = torch.zeros(self.real_B_label.size(),dtype=torch.int64).to(self.device) + self.one = torch.ones(self.real_B_label.size(),dtype=torch.int64).to(self.device) + self.two = 2*torch.ones(self.real_B_label.size(),dtype=torch.int64).to(self.device) + if self.opt.isTrain and self.opt.metricvec: + self.vec = input['vec'].to(self.device) + if self.opt.isTrain and self.opt.metric_inmask: + self.A_maskfg = input['A_maskfg'].to(self.device) + + def forward(self): + """Run forward pass; called by both functions and .""" + if not self.opt.style_control: + self.fake_B = self.netG_A(self.real_A) # G_A(A) + else: + #print(torch.mean(self.real_B_style,(2,3)),'style_control') + #print(self.real_B_style,'style_control') + self.fake_B = self.netG_A(self.real_A, self.real_B_style) + self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A)) + self.fake_A = self.netG_B(self.real_B) # G_B(B) + if not self.opt.style_control: + self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B)) + else: + #print(torch.mean(self.real_B_style,(2,3)),'style_control') + self.rec_B = self.netG_A(self.fake_A, self.real_B_style) # -- cycle_B loss + + if self.opt.use_mask: + self.fake_B_l = self.masked(self.fake_B,self.A_mask) + self.real_B_l = self.masked(self.real_B,self.B_mask) + if self.opt.use_eye_mask: + self.fake_B_le = self.masked(self.fake_B,self.A_maske) + self.real_B_le = self.masked(self.real_B,self.B_maske) + if self.opt.use_lip_mask: + self.fake_B_ll = self.masked(self.fake_B,self.A_maskl) + self.real_B_ll = self.masked(self.real_B,self.B_maskl) + + def backward_D_basic(self, netD, real, fake): + """Calculate GAN loss for the discriminator + + Parameters: + netD (network) -- the discriminator D + real (tensor array) -- real images + fake (tensor array) -- images generated by a generator + + Return the discriminator loss. + We also call loss_D.backward() to calculate the gradients. + """ + # Real + pred_real = netD(real) + loss_D_real = self.criterionGAN(pred_real, True) + # Fake + pred_fake = netD(fake.detach()) + loss_D_fake = self.criterionGAN(pred_fake, False) + # Combined loss and calculate gradients + loss_D = (loss_D_real + loss_D_fake) * 0.5 + loss_D.backward() + return loss_D + + def backward_D_basic_cls(self, netD, real, fake): + # Real + pred_real, pred_real_cls = netD(real) + loss_D_real = self.criterionGAN(pred_real, True) + if not self.opt.style_loss_with_weight: + loss_D_real_cls = self.criterionCls(pred_real_cls, self.real_B_label) + else: + loss_D_real_cls = torch.mean(self.real_B_style0[:,0] * self.criterionCls2(pred_real_cls, self.zero) + self.real_B_style0[:,1] * self.criterionCls2(pred_real_cls, self.one) + self.real_B_style0[:,2] * self.criterionCls2(pred_real_cls, self.two)) + # Fake + pred_fake, pred_fake_cls = netD(fake.detach()) + loss_D_fake = self.criterionGAN(pred_fake, False) + if not self.opt.style_loss_with_weight: + loss_D_fake_cls = self.criterionCls(pred_fake_cls, self.real_B_label) + else: + loss_D_fake_cls = torch.mean(self.real_B_style0[:,0] * self.criterionCls2(pred_fake_cls, self.zero) + self.real_B_style0[:,1] * self.criterionCls2(pred_fake_cls, self.one) + self.real_B_style0[:,2] * self.criterionCls2(pred_fake_cls, self.two)) + # Combined loss and calculate gradients + loss_D = (loss_D_real + loss_D_fake) * 0.5 + loss_D_cls = (loss_D_real_cls + loss_D_fake_cls) * 0.5 + loss_D_total = loss_D + loss_D_cls + loss_D_total.backward() + return loss_D, loss_D_cls + + def backward_D_A(self): + """Calculate GAN loss for discriminator D_A""" + fake_B = self.fake_B_pool.query(self.fake_B) + self.loss_D_A, self.loss_D_A_cls = self.backward_D_basic_cls(self.netD_A, self.real_B, fake_B) + + def backward_D_A_l(self): + """Calculate GAN loss for discriminator D_A_l""" + fake_B = self.fake_B_pool.query(self.fake_B) + self.loss_D_A_l = self.backward_D_basic(self.netD_A_l, self.masked(self.real_B,self.B_mask), self.masked(fake_B,self.A_mask)) + + def backward_D_A_le(self): + """Calculate GAN loss for discriminator D_A_le""" + fake_B = self.fake_B_pool.query(self.fake_B) + self.loss_D_A_le = self.backward_D_basic(self.netD_A_le, self.masked(self.real_B,self.B_maske), self.masked(fake_B,self.A_maske)) + + def backward_D_A_ll(self): + """Calculate GAN loss for discriminator D_A_ll""" + fake_B = self.fake_B_pool.query(self.fake_B) + self.loss_D_A_ll = self.backward_D_basic(self.netD_A_ll, self.masked(self.real_B,self.B_maskl), self.masked(fake_B,self.A_maskl)) + + def backward_D_B(self): + """Calculate GAN loss for discriminator D_B""" + fake_A = self.fake_A_pool.query(self.fake_A) + self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A) + + def update_process(self, epoch): + self.process = (epoch - 1) / float(self.opt.niter_decay + self.opt.niter) + + def backward_G(self): + """Calculate the loss for generators G_A and G_B""" + lambda_idt = self.opt.lambda_identity + lambda_G_A_l = self.opt.lambda_G_A_l + lambda_A = self.opt.lambda_A + lambda_B = self.opt.lambda_B + lambda_A_trunc = self.opt.lambda_A_trunc + if self.opt.ntrunc_trunc: + lambda_A = lambda_A * (1 - self.process * 0.9) + lambda_A_trunc = lambda_A_trunc * self.process * 0.9 + self.lambda_As = [lambda_A, lambda_A_trunc] + # Identity loss + if lambda_idt > 0: + # G_A should be identity if real_B is fed: ||G_A(B) - B|| + self.idt_A = self.netG_A(self.real_B) + self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt + # G_B should be identity if real_A is fed: ||G_B(A) - A|| + self.idt_B = self.netG_B(self.real_A) + self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt + else: + self.loss_idt_A = 0 + self.loss_idt_B = 0 + + # GAN loss D_A(G_A(A)) + pred_fake, pred_fake_cls = self.netD_A(self.fake_B) + self.loss_G_A = self.criterionGAN(pred_fake, True) + if not self.opt.style_loss_with_weight: + self.loss_G_A_cls = self.criterionCls(pred_fake_cls, self.real_B_label) + else: + self.loss_G_A_cls = torch.mean(self.real_B_style0[:,0] * self.criterionCls2(pred_fake_cls, self.zero) + self.real_B_style0[:,1] * self.criterionCls2(pred_fake_cls, self.one) + self.real_B_style0[:,2] * self.criterionCls2(pred_fake_cls, self.two)) + if self.opt.use_mask: + self.loss_G_A_l = self.criterionGAN(self.netD_A_l(self.fake_B_l), True) * lambda_G_A_l + if self.opt.use_eye_mask: + self.loss_G_A_le = self.criterionGAN(self.netD_A_le(self.fake_B_le), True) * lambda_G_A_l + if self.opt.use_lip_mask: + self.loss_G_A_ll = self.criterionGAN(self.netD_A_ll(self.fake_B_ll), True) * lambda_G_A_l + # GAN loss D_B(G_B(B)) + self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True) + # Forward cycle loss || G_B(G_A(A)) - A|| + if self.opt.perceptual_cycle == 0: + self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A + if self.opt.ntrunc_trunc: + self.rec_At = self.netG_B(truncate(self.fake_B,self.opt.trunc_a)) + self.loss_cycle_A2 = self.criterionCycle(self.rec_At, self.real_A) * lambda_A_trunc + else: + if self.opt.perceptual_cycle == 1: + self.loss_cycle_A = self.lpips.forward_pair(self.rec_A, self.real_A).mean() * lambda_A + if self.opt.ntrunc_trunc: + self.rec_At = self.netG_B(truncate(self.fake_B,self.opt.trunc_a)) + self.loss_cycle_A2 = self.lpips.forward_pair(self.rec_At, self.real_A).mean() * lambda_A_trunc + elif self.opt.perceptual_cycle == 2: + ts = self.real_A.shape + rec_A = (self.rec_A[:,0,:,:]*0.299+self.rec_A[:,1,:,:]*0.587+self.rec_A[:,2,:,:]*0.114).unsqueeze(0) + real_A = (self.real_A[:,0,:,:]*0.299+self.real_A[:,1,:,:]*0.587+self.real_A[:,2,:,:]*0.114).unsqueeze(0) + self.loss_cycle_A = self.lpips.forward_pair(rec_A.expand(ts), real_A.expand(ts)).mean() * lambda_A + elif self.opt.perceptual_cycle == 3 and self.opt.use_hed: + ts = self.real_A.shape + #[-1,1]->[0,1]->[-1,1] + rec_A_hed = (self.hed(self.rec_A/2+0.5)-0.5)*2 + real_A_hed = (self.hed(self.real_A/2+0.5)-0.5)*2 + self.loss_cycle_A = self.lpips.forward_pair(rec_A_hed.expand(ts), real_A_hed.expand(ts)).mean() * lambda_A + self.rec_A_hed = rec_A_hed + self.real_A_hed = real_A_hed + print(lambda_A) + elif self.opt.perceptual_cycle == 4: + x_a_feature = self.vgg(self.real_A) + g_a_feature = self.vgg(self.rec_A) + self.loss_cycle_A = self.criterionCycle(g_a_feature, x_a_feature.detach()) * lambda_A + elif self.opt.perceptual_cycle == 5 and self.opt.use_hed: + ts = self.real_A.shape + rec_A_hed = (self.hed(self.rec_A/2+0.5)-0.5)*2 + real_A_hed = (self.hed(self.real_A/2+0.5)-0.5)*2 + x_a_feature = self.vgg(real_A_hed.expand(ts)) + g_a_feature = self.vgg(rec_A_hed.expand(ts)) + self.loss_cycle_A = self.criterionCycle(g_a_feature, x_a_feature.detach()) * lambda_A + self.rec_A_hed = rec_A_hed + self.real_A_hed = real_A_hed + elif self.opt.perceptual_cycle == 6 and self.opt.use_hed and self.opt.ntrunc_trunc: + ts = self.real_A.shape + gpu_p = self.opt.gpu_ids_p[0] + gpu = self.opt.gpu_ids[0] + rec_A_hed = (self.hed(self.rec_A.cuda(gpu_p)/2+0.5)-0.5)*2 + real_A_hed = (self.hed(self.real_A.cuda(gpu_p)/2+0.5)-0.5)*2 + self.rec_At = self.netG_B(truncate(self.fake_B,self.opt.trunc_a)) + rec_At_hed = (self.hed(self.rec_At.cuda(gpu_p)/2+0.5)-0.5)*2 + self.loss_cycle_A = (self.lpips.forward_pair(rec_A_hed.expand(ts), real_A_hed.expand(ts)).mean()).cuda(gpu) * lambda_A + self.loss_cycle_A2 = (self.lpips.forward_pair(rec_At_hed.expand(ts), real_A_hed.expand(ts)).mean()).cuda(gpu) * lambda_A_trunc + self.rec_A_hed = rec_A_hed + self.real_A_hed = real_A_hed + self.rec_At_hed = rec_At_hed + elif self.opt.perceptual_cycle == 8 and self.opt.use_hed and self.opt.ntrunc_trunc: + ts = self.real_A.shape + rec_A_hed = (self.hed(self.rec_A/2+0.5)-0.5)*2 + real_A_hed = (self.hed(self.real_A/2+0.5)-0.5)*2 + self.rec_At = self.netG_B(truncate(self.fake_B,self.opt.trunc_a)) + rec_At_hed = (self.hed(self.rec_At/2+0.5)-0.5)*2 + x_a_feature = self.vgg(real_A_hed.expand(ts)) + g_a_feature = self.vgg(rec_A_hed.expand(ts)) + gt_a_feature = self.vgg(rec_At_hed.expand(ts)) + self.loss_cycle_A = self.criterionCycle(g_a_feature, x_a_feature.detach()) * lambda_A + self.loss_cycle_A2 = self.criterionCycle(gt_a_feature, x_a_feature.detach()) * lambda_A_trunc + self.rec_A_hed = rec_A_hed + self.real_A_hed = real_A_hed + self.rec_At_hed = rec_At_hed + + # Backward cycle loss || G_A(G_B(B)) - B|| + self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B + + # Metric loss, metric higher better + if self.opt.metric: + self.fake_B2 = self.fake_B.clone() + if self.opt.metric_inmask: + # background black + #self.fake_B2 = (self.fake_B2/2+0.5)*self.A_maskfg*2-1 + # background white + self.fake_B2 = ((self.fake_B2/2+0.5)*self.A_maskfg+1-self.A_maskfg)*2-1 + if not self.opt.metric_resnext and not self.opt.metric_resnet: # for two version of inception (during training input is [-1,1]) + self.fake_B2 = torch.nn.functional.interpolate(input=self.fake_B2, size=(299, 299), mode='bilinear', align_corners=False) + self.fake_B2 = self.fake_B2.repeat(1,3,1,1) + else: # for resnet and resnext + self.fake_B2 = torch.nn.functional.interpolate(input=self.fake_B2, size=(224, 224), mode='bilinear', align_corners=False) + x = self.fake_B2.repeat(1,3,1,1) + # [-1,1] -> [0,1] -> mean [0.485,0.456,0.406], std [0.229,0.224,0.225] + x_ch0 = (torch.unsqueeze(x[:, 0],1)*0.5+0.5-0.485)/0.229 + x_ch1 = (torch.unsqueeze(x[:, 1],1)*0.5+0.5-0.456)/0.224 + x_ch2 = (torch.unsqueeze(x[:, 2],1)*0.5+0.5-0.406)/0.225 + self.fake_B2 = torch.cat((x_ch0, x_ch1, x_ch2, x[:, 3:]), 1) + + + if not self.opt.metricvec: + pred = self.metric(self.fake_B2) + else: + pred = self.metric(torch.cat((self.fake_B2, self.vec),1)) + self.loss_metric = torch.mean((1-pred)) * self.opt.lambda_metric + + # combined loss and calculate gradients + self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B + if getattr(self,'loss_cycle_A2',-1) != -1: + self.loss_G = self.loss_G + self.loss_cycle_A2 + if getattr(self,'loss_G_A_l',-1) != -1: + self.loss_G = self.loss_G + self.loss_G_A_l + if getattr(self,'loss_G_A_le',-1) != -1: + self.loss_G = self.loss_G + self.loss_G_A_le + if getattr(self,'loss_G_A_ll',-1) != -1: + self.loss_G = self.loss_G + self.loss_G_A_ll + if getattr(self,'loss_G_A_cls',-1) != -1: + self.loss_G = self.loss_G + self.loss_G_A_cls + if getattr(self,'loss_metric',-1) != -1: + self.loss_G = self.loss_G + self.loss_metric + self.loss_G.backward() + + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + # forward + self.forward() # compute fake images and reconstruction images. + # G_A and G_B + self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs + if self.opt.use_mask: + self.set_requires_grad([self.netD_A_l], False) + if self.opt.use_eye_mask: + self.set_requires_grad([self.netD_A_le], False) + if self.opt.use_lip_mask: + self.set_requires_grad([self.netD_A_ll], False) + self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero + self.backward_G() # calculate gradients for G_A and G_B + self.optimizer_G.step() # update G_A and G_B's weights + # D_A and D_B + self.set_requires_grad([self.netD_A, self.netD_B], True) + if self.opt.use_mask: + self.set_requires_grad([self.netD_A_l], True) + if self.opt.use_eye_mask: + self.set_requires_grad([self.netD_A_le], True) + if self.opt.use_lip_mask: + self.set_requires_grad([self.netD_A_ll], True) + self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero + self.backward_D_A() # calculate gradients for D_A + if self.opt.use_mask: + self.backward_D_A_l()# calculate gradients for D_A_l + if self.opt.use_eye_mask: + self.backward_D_A_le()# calculate gradients for D_A_le + if self.opt.use_lip_mask: + self.backward_D_A_ll()# calculate gradients for D_A_ll + self.backward_D_B() # calculate graidents for D_B + self.optimizer_D.step() # update D_A and D_B's weights diff --git a/robot_painting/qmupd_vs/models/dist_model.py b/robot_painting/qmupd_vs/models/dist_model.py new file mode 100644 index 0000000000000000000000000000000000000000..e61d5de0214978ef071cb520dcbed77882c59836 --- /dev/null +++ b/robot_painting/qmupd_vs/models/dist_model.py @@ -0,0 +1,323 @@ + +from __future__ import absolute_import + +import sys +sys.path.append('..') +sys.path.append('.') +import numpy as np +import torch +from torch import nn +from collections import OrderedDict +from torch.autograd import Variable +from .base_model import BaseModel +from scipy.ndimage import zoom +import skimage.transform + +from . import networks_basic as networks +# from PerceptualSimilarity.util import util +from util import util + +class DistModel(BaseModel): + def name(self): + return self.model_name + + def __init__(self, opt, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'): + ''' + INPUTS + model - ['net-lin'] for linearly calibrated network + ['net'] for off-the-shelf network + ['L2'] for L2 distance in Lab colorspace + ['SSIM'] for ssim in RGB colorspace + net - ['squeeze','alex','vgg'] + model_path - if None, will look in weights/[NET_NAME].pth + colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM + use_gpu - bool - whether or not to use a GPU + printNet - bool - whether or not to print network architecture out + spatial - bool - whether to output an array containing varying distances across spatial dimensions + spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below). + spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images. + spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear). + is_train - bool - [True] for training mode + lr - float - initial learning rate + beta1 - float - initial momentum term for adam + version - 0.1 for latest, 0.0 was original + ''' + BaseModel.__init__(self, opt) + + self.model = model + self.net = net + self.use_gpu = use_gpu + self.is_train = is_train + self.spatial = spatial + self.spatial_shape = spatial_shape + self.spatial_order = spatial_order + self.spatial_factor = spatial_factor + + self.model_name = '%s [%s]'%(model,net) + if(self.model == 'net-lin'): # pretrained net + linear layer + #self.device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu') + self.device = torch.device('cuda:{}'.format(opt.gpu_ids_p[0])) if opt.gpu_ids_p else torch.device('cpu') + self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version,lpips=True).to(self.device) + kw = {} + + if not use_gpu: + kw['map_location'] = 'cpu' + if(model_path is None): + import inspect + #model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', '..', 'weights/v%s/%s.pth'%(version,net))) + model_path = './checkpoints/weights/v%s/%s.pth'%(version,net) + + if(not is_train): + print('Loading model from: %s'%model_path) + #self.net.load_state_dict(torch.load(model_path, **kw)) + state_dict = torch.load(model_path, map_location=str(self.device)) + self.net.load_state_dict(state_dict, strict=False) + + elif(self.model=='net'): # pretrained network + assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks' + self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net,device=self.device) + self.is_fake_net = True + elif(self.model in ['L2','l2']): + self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace,device=self.device) # not really a network, only for testing + self.model_name = 'L2' + elif(self.model in ['DSSIM','dssim','SSIM','ssim']): + self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace,device=self.device) + self.model_name = 'SSIM' + else: + raise ValueError("Model [%s] not recognized." % self.model) + + self.parameters = list(self.net.parameters()) + + if self.is_train: # training mode + # extra network on top to go from distances (d0,d1) => predicted human judgment (h*) + self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu,device=self.device) + self.parameters+=self.rankLoss.parameters + self.lr = lr + self.old_lr = lr + self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999)) + else: # test mode + self.net.eval() + + if(printNet): + print('---------- Networks initialized -------------') + networks.print_network(self.net) + print('-----------------------------------------------') + + def forward_pair(self,in1,in2,retPerLayer=False): + if(retPerLayer): + return self.net.forward(in1,in2, retPerLayer=True) + else: + return self.net.forward(in1,in2) + + def forward(self, in0, in1, retNumpy=False): + ''' Function computes the distance between image patches in0 and in1 + INPUTS + in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1] + retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array + OUTPUT + computed distances between in0 and in1 + ''' + + self.input_ref = in0 + self.input_p0 = in1 + + self.var_ref = Variable(self.input_ref,requires_grad=True) + self.var_p0 = Variable(self.input_p0,requires_grad=True) + + self.d0 = self.forward_pair(self.var_ref, self.var_p0) + self.loss_total = self.d0 + + def convert_output(d0): + if(retNumpy): + ans = d0.cpu().data.numpy() + if not self.spatial: + ans = ans.flatten() + else: + assert(ans.shape[0] == 1 and len(ans.shape) == 4) + return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels) + return ans + else: + return d0 + + if self.spatial: + L = [convert_output(x) for x in self.d0] + spatial_shape = self.spatial_shape + if spatial_shape is None: + if(self.spatial_factor is None): + spatial_shape = (in0.size()[2],in0.size()[3]) + else: + spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor) + + L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L] + + L = np.mean(np.concatenate(L, 2) * len(L), 2) + return L + else: + return convert_output(self.d0) + + # ***** TRAINING FUNCTIONS ***** + def optimize_parameters(self): + self.forward_train() + self.optimizer_net.zero_grad() + self.backward_train() + self.optimizer_net.step() + self.clamp_weights() + + def clamp_weights(self): + for module in self.net.modules(): + if(hasattr(module, 'weight') and module.kernel_size==(1,1)): + module.weight.data = torch.clamp(module.weight.data,min=0) + + def set_input(self, data): + self.input_ref = data['ref'] + self.input_p0 = data['p0'] + self.input_p1 = data['p1'] + self.input_judge = data['judge'] + + if(self.use_gpu): + self.input_ref = self.input_ref.cuda(self.device) + self.input_p0 = self.input_p0.cuda(self.device) + self.input_p1 = self.input_p1.cuda(self.device) + self.input_judge = self.input_judge.cuda(self.device) + + self.var_ref = Variable(self.input_ref,requires_grad=True) + self.var_p0 = Variable(self.input_p0,requires_grad=True) + self.var_p1 = Variable(self.input_p1,requires_grad=True) + + def forward_train(self): # run forward pass + self.d0 = self.forward_pair(self.var_ref, self.var_p0) + self.d1 = self.forward_pair(self.var_ref, self.var_p1) + self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge) + + # var_judge + self.var_judge = Variable(1.*self.input_judge).view(self.d0.size()) + + self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.) + return self.loss_total + + def backward_train(self): + torch.mean(self.loss_total).backward() + + def compute_accuracy(self,d0,d1,judge): + ''' d0, d1 are Variables, judge is a Tensor ''' + d1_lt_d0 = (d1 %f' % (type,self.old_lr, lr)) + self.old_lr = lr + + + +def score_2afc_dataset(data_loader,func): + ''' Function computes Two Alternative Forced Choice (2AFC) score using + distance function 'func' in dataset 'data_loader' + INPUTS + data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside + func - callable distance function - calling d=func(in0,in1) should take 2 + pytorch tensors with shape Nx3xXxY, and return numpy array of length N + OUTPUTS + [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators + [1] - dictionary with following elements + d0s,d1s - N arrays containing distances between reference patch to perturbed patches + gts - N array in [0,1], preferred patch selected by human evaluators + (closer to "0" for left patch p0, "1" for right patch p1, + "0.6" means 60pct people preferred right patch, 40pct preferred left) + scores - N array in [0,1], corresponding to what percentage function agreed with humans + CONSTS + N - number of test triplets in data_loader + ''' + + d0s = [] + d1s = [] + gts = [] + + # bar = pb.ProgressBar(max_value=data_loader.load_data().__len__()) + for (i,data) in enumerate(data_loader.load_data()): + d0s+=func(data['ref'],data['p0']).tolist() + d1s+=func(data['ref'],data['p1']).tolist() + gts+=data['judge'].cpu().numpy().flatten().tolist() + # bar.update(i) + + d0s = np.array(d0s) + d1s = np.array(d1s) + gts = np.array(gts) + scores = (d0s epochs + and linearly decay the rate to zero over the next epochs. + For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. + See https://pytorch.org/docs/stable/optim.html for more details. + """ + if opt.lr_policy == 'linear': + def lambda_rule(epoch): + lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1) + return lr_l + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + elif opt.lr_policy == 'step': + scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) + elif opt.lr_policy == 'plateau': + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) + elif opt.lr_policy == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) + else: + return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) + return scheduler + + +def init_weights(net, init_type='normal', init_gain=0.02): + """Initialize network weights. + + Parameters: + net (network) -- network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + + We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might + work better for some applications. Feel free to try yourself. + """ + def init_func(m): # define the initialization function + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, init_gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=init_gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=init_gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. + init.normal_(m.weight.data, 1.0, init_gain) + init.constant_(m.bias.data, 0.0) + + print('initialize network with %s' % init_type) + net.apply(init_func) # apply the initialization function + + +def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): + """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights + Parameters: + net (network) -- the network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Return an initialized network. + """ + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids[0]) + net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs + init_weights(net, init_type, init_gain=init_gain) + return net + + +def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], model0_res=0, model1_res=0, extra_channel=3): + """Create a generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128 + norm (str) -- the name of normalization layers used in the network: batch | instance | none + use_dropout (bool) -- if use dropout layers. + init_type (str) -- the name of our initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a generator + + Our current implementation provides two types of generators: + U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images) + The original U-Net paper: https://arxiv.org/abs/1505.04597 + + Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks) + Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations. + We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style). + + + The generator has been initialized by . It uses RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netG == 'resnet_9blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9) + elif netG == 'resnet_8blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=8) + elif netG == 'resnet_style_9blocks': + net = ResnetStyleGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, extra_channel=extra_channel) + elif netG == 'resnet_style2_9blocks': + net = ResnetStyle2Generator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, model0_res=model0_res, extra_channel=extra_channel) + elif netG == 'resnet_style2_8blocks': + net = ResnetStyle2Generator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=8, model0_res=model0_res, extra_channel=extra_channel) + elif netG == 'resnet_style2_10blocks': + net = ResnetStyle2Generator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=10, model0_res=model0_res, extra_channel=extra_channel) + elif netG == 'resnet_style3decoder_9blocks': + net = ResnetStyle3DecoderGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, model0_res=model0_res) + elif netG == 'resnet_style2mc_9blocks': + net = ResnetStyle2MCGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, model0_res=model0_res, extra_channel=extra_channel) + elif netG == 'resnet_style2mc2_9blocks': + net = ResnetStyle2MC2Generator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, model0_res=model0_res, model1_res=model1_res, extra_channel=extra_channel) + elif netG == 'resnet_6blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6) + elif netG == 'unet_128': + net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_256': + net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + else: + raise NotImplementedError('Generator model name [%s] is not recognized' % netG) + return init_net(net, init_type, init_gain, gpu_ids) + + +def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[], n_class=3): + """Create a discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the first conv layer + netD (str) -- the architecture's name: basic | n_layers | pixel + n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers' + norm (str) -- the type of normalization layers used in the network. + init_type (str) -- the name of the initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a discriminator + + Our current implementation provides three types of discriminators: + [basic]: 'PatchGAN' classifier described in the original pix2pix paper. + It can classify whether 70×70 overlapping patches are real or fake. + Such a patch-level discriminator architecture has fewer parameters + than a full-image discriminator and can work on arbitrarily-sized images + in a fully convolutional fashion. + + [n_layers]: With this mode, you cna specify the number of conv layers in the discriminator + with the parameter (default=3 as used in [basic] (PatchGAN).) + + [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not. + It encourages greater color diversity but has no effect on spatial statistics. + + The discriminator has been initialized by . It uses Leakly RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netD == 'basic': # default PatchGAN classifier + net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer) + elif netD == 'basic_cls': + net = NLayerDiscriminatorCls(input_nc, ndf, n_layers=3, n_class=3, norm_layer=norm_layer) + elif netD == 'n_layers': # more options + net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer) + elif netD == 'pixel': # classify if each pixel is real or fake + net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) + else: + raise NotImplementedError('Discriminator model name [%s] is not recognized' % net) + return init_net(net, init_type, init_gain, gpu_ids) + + +def define_HED(init_weights_, gpu_ids_=[]): + net = HED() + + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) # multi-GPUs + + if not init_weights_ == None: + device = torch.device('cuda:{}'.format(gpu_ids_[0])) if gpu_ids_ else torch.device('cpu') + print('Loading model from: %s'%init_weights_) + state_dict = torch.load(init_weights_, map_location=str(device)) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + + return net + +def define_VGG(init_weights_, feature_mode_, batch_norm_=False, num_classes_=1000, gpu_ids_=[]): + net = VGG19(init_weights=init_weights_, feature_mode=feature_mode_, batch_norm=batch_norm_, num_classes=num_classes_) + # set the GPU + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) # multi-GPUs + + if not init_weights_ == None: + device = torch.device('cuda:{}'.format(gpu_ids_[0])) if gpu_ids_ else torch.device('cpu') + print('Loading model from: %s'%init_weights_) + state_dict = torch.load(init_weights_, map_location=str(device)) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + return net + +################################################################################################################### +from torchvision.models import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, vgg19_bn +def define_vgg11_bn(gpu_ids_=[],vec=0): + net = vgg11_bn(pretrained=True) + net.classifier[6] = nn.Linear(4096, 1) #LSGAN needs no sigmoid, LSGAN-nn.MSELoss() + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +def define_vgg19_bn(gpu_ids_=[],vec=0): + net = vgg19_bn(pretrained=True) + net.classifier[6] = nn.Linear(4096, 1) #LSGAN needs no sigmoid, LSGAN-nn.MSELoss() + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +def define_vgg19(gpu_ids_=[],vec=0): + net = vgg19(pretrained=True) + net.classifier[6] = nn.Linear(4096, 1) #LSGAN needs no sigmoid, LSGAN-nn.MSELoss() + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +################################################################################################################### +from torchvision.models import resnet18, resnet34, resnet50, resnet101, resnet152 +def define_resnet101(gpu_ids_=[],vec=0): + net = resnet101(pretrained=True) + num_ftrs = net.fc.in_features + net.fc = nn.Linear(num_ftrs, 1) #LSGAN needs no sigmoid, LSGAN-nn.MSELoss() + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +def define_resnet101a(init_weights_,gpu_ids_=[],vec=0): + net = resnet101(pretrained=True) + num_ftrs = net.fc.in_features + net.fc = nn.Linear(num_ftrs, 1) #LSGAN needs no sigmoid, LSGAN-nn.MSELoss() + if not init_weights_ == None: + print('Loading model from: %s'%init_weights_) + state_dict = torch.load(init_weights_, map_location=str(torch.device('cpu'))) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +################################################################################################################### +import pretrainedmodels.models.resnext as resnext +def define_resnext101(gpu_ids_=[],vec=0): + net = resnext.resnext101_64x4d(num_classes=1000,pretrained='imagenet') + net.last_linear = nn.Linear(2048, 1) #LSGAN needs no sigmoid, LSGAN-nn.MSELoss() + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +def define_resnext101a(init_weights_,gpu_ids_=[],vec=0): + net = resnext.resnext101_64x4d(num_classes=1000,pretrained='imagenet') + net.last_linear = nn.Linear(2048, 1) #LSGAN needs no sigmoid, LSGAN-nn.MSELoss() + if not init_weights_ == None: + print('Loading model from: %s'%init_weights_) + state_dict = torch.load(init_weights_, map_location=str(torch.device('cpu'))) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +################################################################################################################### +from torchvision.models import Inception3, inception_v3 +def define_inception3(gpu_ids_=[],vec=0): + net = inception_v3(pretrained=True) + net.transform_input = False # assume [-1,1] input + net.fc = nn.Linear(2048, 1) + net.aux_logits = False + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +def define_inception3a(init_weights_,gpu_ids_=[],vec=0): + net = inception_v3(pretrained=True) + net.transform_input = False # assume [-1,1] input + net.fc = nn.Linear(2048, 1) + net.aux_logits = False + if not init_weights_ == None: + print('Loading model from: ', init_weights_) + state_dict = torch.load(init_weights_, map_location=str(torch.device('cpu'))) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + return net +################################################################################################################### +from torchvision.models.inception import BasicConv2d +def define_inception_v3(init_weights_,gpu_ids_=[],vec=0): + + ## pretrained = True + kwargs = {} + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' in kwargs: + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + else: + original_aux_logits = True + print(kwargs) + net = Inception3(**kwargs) + + if not init_weights_ == None: + print('Loading model from: %s'%init_weights_) + state_dict = torch.load(init_weights_, map_location=str(torch.device('cpu'))) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + + if not original_aux_logits: + net.aux_logits = False + del net.AuxLogits + + net.fc = nn.Linear(2048, 1) + if vec == 1: + net.Conv2d_1a_3x3 = BasicConv2d(6, 32, kernel_size=3, stride=2) + net.aux_logits = False + + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + + return net + +def define_inception_v3a(init_weights_,gpu_ids_=[],vec=0): + + kwargs = {} + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' in kwargs: + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + else: + original_aux_logits = True + print(kwargs) + net = Inception3(**kwargs) + + if not original_aux_logits: + net.aux_logits = False + del net.AuxLogits + + net.fc = nn.Linear(2048, 1) + if vec == 1: + net.Conv2d_1a_3x3 = BasicConv2d(6, 32, kernel_size=3, stride=2) + net.aux_logits = False + + if not init_weights_ == None: + print('Loading model from: %s'%init_weights_) + state_dict = torch.load(init_weights_, map_location=str(torch.device('cpu'))) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + net = torch.nn.DataParallel(net, gpu_ids_) + + return net + +def define_inception_ori(init_weights_,transform_input=False,gpu_ids_=[]): + + ## pretrained = True + kwargs = {} + kwargs['transform_input'] = transform_input + + if 'aux_logits' in kwargs: + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + else: + original_aux_logits = True + print(kwargs) + net = Inception3(**kwargs) + + + if not init_weights_ == None: + print('Loading model from: %s'%init_weights_) + state_dict = torch.load(init_weights_, map_location=str(torch.device('cpu'))) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + #for e in list(net.modules()): + # print(e) + + if not original_aux_logits: + net.aux_logits = False + del net.AuxLogits + + + if len(gpu_ids_) > 0: + assert(torch.cuda.is_available()) + net.cuda(gpu_ids_[0]) + + return net +################################################################################################################### + +def define_DT(init_weights_, input_nc_, output_nc_, ngf_, netG_, norm_, use_dropout_, init_type_, init_gain_, gpu_ids_): + net = define_G(input_nc_, output_nc_, ngf_, netG_, norm_, use_dropout_, init_type_, init_gain_, gpu_ids_) + + if not init_weights_ == None: + device = torch.device('cuda:{}'.format(gpu_ids_[0])) if gpu_ids_ else torch.device('cpu') + print('Loading model from: %s'%init_weights_) + state_dict = torch.load(init_weights_, map_location=str(device)) + if isinstance(net, torch.nn.DataParallel): + net.module.load_state_dict(state_dict) + else: + net.load_state_dict(state_dict) + print('load the weights successfully') + return net + +def define_C(input_nc, classes, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], h=512, w=512, nnG=3, dim=4096): + net = None + norm_layer = get_norm_layer(norm_type=norm) + if netG == 'classifier': + net = Classifier(input_nc, classes, ngf, num_downs=nnG, norm_layer=norm_layer, use_dropout=use_dropout, h=h, w=w, dim=dim) + elif netG == 'vgg': + net = VGG19(init_weights=None, feature_mode=False, batch_norm=True, num_classes=classes) + return init_net(net, init_type, init_gain, gpu_ids) + +############################################################################## +# Classes +############################################################################## +class GANLoss(nn.Module): + """Define different GAN objectives. + + The GANLoss class abstracts away the need to create the target label tensor + that has the same size as the input. + """ + + def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): + """ Initialize the GANLoss class. + + Parameters: + gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. + target_real_label (bool) - - label for a real image + target_fake_label (bool) - - label of a fake image + + Note: Do not use sigmoid as the last layer of Discriminator. + LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. + """ + super(GANLoss, self).__init__() + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + self.gan_mode = gan_mode + if gan_mode == 'lsgan':#cyclegan + self.loss = nn.MSELoss() + elif gan_mode == 'vanilla': + self.loss = nn.BCEWithLogitsLoss() + elif gan_mode in ['wgangp']: + self.loss = None + else: + raise NotImplementedError('gan mode %s not implemented' % gan_mode) + + def get_target_tensor(self, prediction, target_is_real): + """Create label tensors with the same size as the input. + + Parameters: + prediction (tensor) - - tpyically the prediction from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + A label tensor filled with ground truth label, and with the size of the input + """ + + if target_is_real: + target_tensor = self.real_label + else: + target_tensor = self.fake_label + return target_tensor.expand_as(prediction) + + def __call__(self, prediction, target_is_real): + """Calculate loss given Discriminator's output and grount truth labels. + + Parameters: + prediction (tensor) - - tpyically the prediction output from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + the calculated loss. + """ + if self.gan_mode in ['lsgan', 'vanilla']: + target_tensor = self.get_target_tensor(prediction, target_is_real) + loss = self.loss(prediction, target_tensor) + elif self.gan_mode == 'wgangp': + if target_is_real: + loss = -prediction.mean() + else: + loss = prediction.mean() + return loss + + +def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): + """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 + + Arguments: + netD (network) -- discriminator network + real_data (tensor array) -- real images + fake_data (tensor array) -- generated images from the generator + device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') + type (str) -- if we mix real and fake data or not [real | fake | mixed]. + constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2 + lambda_gp (float) -- weight for this loss + + Returns the gradient penalty loss + """ + if lambda_gp > 0.0: + if type == 'real': # either use real images, fake images, or a linear interpolation of two. + interpolatesv = real_data + elif type == 'fake': + interpolatesv = fake_data + elif type == 'mixed': + alpha = torch.rand(real_data.shape[0], 1, device=device) + alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) + interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) + else: + raise NotImplementedError('{} not implemented'.format(type)) + interpolatesv.requires_grad_(True) + disc_interpolates = netD(interpolatesv) + gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, + grad_outputs=torch.ones(disc_interpolates.size()).to(device), + create_graph=True, retain_graph=True, only_inputs=True) + gradients = gradients[0].view(real_data.size(0), -1) # flat the data + gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps + return gradient_penalty, gradients + else: + return 0.0, None + + +class ResnetGenerator(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetGenerator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input, feature_mode = False): + """Standard forward""" + if not feature_mode: + return self.model(input) + else: + module_list = list(self.model.modules()) + x = input.clone() + indexes = list(range(1,11))+[11,20,29,38,47,56,65,74,83]+list(range(92,101)) + for i in indexes: + x = module_list[i](x) + if i == 3: + x1 = x.clone() + elif i == 6: + x2 = x.clone() + elif i == 9: + x3 = x.clone() + elif i == 47: + y7 = x.clone() + elif i == 83: + y4 = x.clone() + elif i == 93: + y3 = x.clone() + elif i == 96: + y2 = x.clone() + #y = self.model(input) + #pdb.set_trace() + return x,x1,x2,x3,y4,y3,y2,y7 + +class ResnetStyleGenerator(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetStyleGenerator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model0 = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model0 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + model1 = [nn.Conv2d(3, ngf * mult, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult), + nn.ReLU(True)] + + model = [] + model += [nn.Conv2d(ngf * mult * 2, ngf * mult, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult), + nn.ReLU(True)] + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model0 = nn.Sequential(*model0) + self.model1 = nn.Sequential(*model1) + self.model = nn.Sequential(*model) + + def forward(self, input1, input2): + """Standard forward""" + f1 = self.model0(input1) + f2 = self.model1(input2) + #pdb.set_trace() + f1 = torch.cat((f1,f2), 1) + return self.model(f1) + + +class ResnetStyle2Generator(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', extra_channel=3, model0_res=0): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetStyle2Generator, self).__init__() + self.n_blocks = n_blocks + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model0 = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model0 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + for i in range(model0_res): # add ResNet blocks + model0 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + model = [] + model += [nn.Conv2d(ngf * mult + extra_channel, ngf * mult, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult), + nn.ReLU(True)] + + for i in range(n_blocks-model0_res): # add ResNet blocks + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model0 = nn.Sequential(*model0) + self.model = nn.Sequential(*model) + #print(list(self.modules())) + + def forward(self, input1, input2, feature_mode=False, ablate_res=-1): + """Standard forward""" + if not feature_mode: + if ablate_res == -1: + f1 = self.model0(input1) + y1 = torch.cat([f1, input2], 1) + return self.model(y1) + else: + f1 = self.model0(input1) + y = torch.cat([f1, input2], 1) + module_list = list(self.model.modules()) + for i in range(1, 4):#merge module + y = module_list[i](y) + for k in range(self.n_blocks):#resblocks + if k+1 == ablate_res: + print('skip resblock'+str(k+1)) + continue + y1 = y.clone() + for i in range(6+9*k,13+9*k): + y = module_list[i](y) + y = y1 + y + for i in range(4+9*self.n_blocks,13+9*self.n_blocks):#up convs + y = module_list[i](y) + return y + else: + module_list0 = list(self.model0.modules()) + x = input1.clone() + for i in range(1,11): + x = module_list0[i](x) + if i == 3: + x1 = x.clone()#[1,64,512,512] + elif i == 6: + x2 = x.clone()#[1,128,256,256] + elif i == 9: + x3 = x.clone()#[1,256,128,128] + #f1 = self.model0(input1)#[1,256,128,128] + #pdb.set_trace() + y1 = torch.cat([x, input2], 1)#[1,259,128,128] + module_list = list(self.model.modules()) + indexes = list(range(1,4))+[4,13,22,31,40,49,58,67,76]+list(range(85,94)) + y = y1.clone() + for i in indexes: + y = module_list[i](y) + if i == 76: + y4 = y.clone()#[1,256,128,128] + elif i == 86: + y3 = y.clone()#[1,128,256,256] + elif i == 89: + y2 = y.clone()#[1,64,512,512] + elif i == 40: + y7 = y.clone() + #out = self.model(y1) + #pdb.set_trace() + return y,x1,x2,x3,y4,y3,y2,y7 + +class ResnetStyle3DecoderGenerator(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', model0_res=0): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetStyle3DecoderGenerator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model0 = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model0 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + for i in range(model0_res): # add ResNet blocks + model0 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + model1 = [] + model2 = [] + model3 = [] + for i in range(n_blocks-model0_res): # add ResNet blocks + model1 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + model2 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + model3 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model1 += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model2 += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model3 += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model1 += [nn.ReflectionPad2d(3)] + model1 += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model1 += [nn.Tanh()] + model2 += [nn.ReflectionPad2d(3)] + model2 += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model2 += [nn.Tanh()] + model3 += [nn.ReflectionPad2d(3)] + model3 += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model3 += [nn.Tanh()] + + self.model0 = nn.Sequential(*model0) + self.model1 = nn.Sequential(*model1) + self.model2 = nn.Sequential(*model2) + self.model3 = nn.Sequential(*model3) + print(list(self.modules())) + + def forward(self, input, domain): + """Standard forward""" + f1 = self.model0(input) + if domain == 0: + y = self.model1(f1) + elif domain == 1: + y = self.model2(f1) + elif domain == 2: + y = self.model3(f1) + return y + +class ResnetStyle2MCGenerator(nn.Module): + # multi-column + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', extra_channel=3, model0_res=0): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetStyle2MCGenerator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model0 = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + model1_3 = [] + model1_5 = [] + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model1_3 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + model1_5 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=5, stride=2, padding=2, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + for i in range(model0_res): # add ResNet blocks + model1_3 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + model1_5 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, kernel=5)] + + model = [] + model += [nn.Conv2d(ngf * mult * 2 + extra_channel, ngf * mult, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult), + nn.ReLU(True)] + + for i in range(n_blocks-model0_res): # add ResNet blocks + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model0 = nn.Sequential(*model0) + self.model1_3 = nn.Sequential(*model1_3) + self.model1_5 = nn.Sequential(*model1_5) + self.model = nn.Sequential(*model) + print(list(self.modules())) + + def forward(self, input1, input2): + """Standard forward""" + f0 = self.model0(input1) + f1 = self.model1_3(f0) + f2 = self.model1_5(f0) + y1 = torch.cat([f1, f2, input2], 1) + return self.model(y1) + +class ResnetStyle2MC2Generator(nn.Module): + # multi-column, need to insert style early + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', extra_channel=3, model0_res=0, model1_res=0): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetStyle2MC2Generator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model0 = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + model1_3 = [] + model1_5 = [] + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model1_3 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + model1_5 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=5, stride=2, padding=2, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + for i in range(model0_res): # add ResNet blocks + model1_3 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + model1_5 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, kernel=5)] + + model2_3 = [] + model2_5 = [] + model2_3 += [nn.Conv2d(ngf * mult + extra_channel, ngf * mult, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult), + nn.ReLU(True)] + model2_5 += [nn.Conv2d(ngf * mult + extra_channel, ngf * mult, kernel_size=5, stride=1, padding=2, bias=use_bias), + norm_layer(ngf * mult), + nn.ReLU(True)] + + for i in range(model1_res): # add ResNet blocks + model2_3 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + model2_5 += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, kernel=5)] + + model = [] + model += [nn.Conv2d(ngf * mult * 2, ngf * mult, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult), + nn.ReLU(True)] + for i in range(n_blocks-model0_res-model1_res): # add ResNet blocks + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model0 = nn.Sequential(*model0) + self.model1_3 = nn.Sequential(*model1_3) + self.model1_5 = nn.Sequential(*model1_5) + self.model2_3 = nn.Sequential(*model2_3) + self.model2_5 = nn.Sequential(*model2_5) + self.model = nn.Sequential(*model) + print(list(self.modules())) + + def forward(self, input1, input2): + """Standard forward""" + f0 = self.model0(input1) + f1 = self.model1_3(f0) + f2 = self.model1_5(f0) + f3 = self.model2_3(torch.cat([f1,input2],1)) + f4 = self.model2_5(torch.cat([f2,input2],1)) + #pdb.set_trace() + return self.model(torch.cat([f3,f4],1)) + +class ResnetBlock(nn.Module): + """Define a Resnet block""" + + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, kernel=3): + """Initialize the Resnet block + + A resnet block is a conv block with skip connections + We construct a conv block with build_conv_block function, + and implement skip connections in function. + Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf + """ + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, kernel) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, kernel=3): + """Construct a convolutional block. + + Parameters: + dim (int) -- the number of channels in the conv layer. + padding_type (str) -- the name of padding layer: reflect | replicate | zero + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + use_bias (bool) -- if the conv layer uses bias or not + + Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) + """ + conv_block = [] + p = 0 + pad = int((kernel-1)/2) + if padding_type == 'reflect':#by default + conv_block += [nn.ReflectionPad2d(pad)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(pad)] + elif padding_type == 'zero': + p = pad + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=kernel, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(pad)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(pad)] + elif padding_type == 'zero': + p = pad + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=kernel, padding=p, bias=use_bias), norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + """Forward function (with skip connections)""" + out = x + self.conv_block(x) # add skip connections + return out + + +class UnetGenerator(nn.Module): + """Create a Unet-based generator""" + + def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet generator + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7, + image of size 128x128 will become of size 1x1 # at the bottleneck + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + + We construct the U-Net from the innermost layer to the outermost layer. + It is a recursive process. + """ + super(UnetGenerator, self).__init__() + # construct unet structure + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer + for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + # gradually reduce the number of filters from ngf * 8 to ngf + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class UnetSkipConnectionBlock(nn.Module): + """Defines the Unet submodule with skip connection. + X -------------------identity---------------------- + |-- downsampling -- |submodule| -- upsampling --| + """ + + def __init__(self, outer_nc, inner_nc, input_nc=None, + submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet submodule with skip connections. + + Parameters: + outer_nc (int) -- the number of filters in the outer conv layer + inner_nc (int) -- the number of filters in the inner conv layer + input_nc (int) -- the number of channels in input images/features + submodule (UnetSkipConnectionBlock) -- previously defined submodules + outermost (bool) -- if this module is the outermost module + innermost (bool) -- if this module is the innermost module + norm_layer -- normalization layer + user_dropout (bool) -- if use dropout layers. + """ + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, + stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv] + up = [uprelu, upconv, upnorm] + model = down + up + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.outermost: + return self.model(x) + else: # add skip connections + return torch.cat([x, self.model(x)], 1) + + +class NLayerDiscriminator(nn.Module): + """Defines a PatchGAN discriminator""" + + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d): + """Construct a PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + n_layers (int) -- the number of conv layers in the discriminator + norm_layer -- normalization layer + """ + super(NLayerDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func != nn.BatchNorm2d + else: + use_bias = norm_layer != nn.BatchNorm2d + + kw = 4 + padw = 1 + sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, n_layers): # gradually increase the number of filters + nf_mult_prev = nf_mult + nf_mult = min(2 ** n, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + nf_mult_prev = nf_mult + nf_mult = min(2 ** n_layers, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map + self.model = nn.Sequential(*sequence) + + def forward(self, input): + """Standard forward.""" + return self.model(input) + + +class NLayerDiscriminatorCls(nn.Module): + """Defines a PatchGAN discriminator""" + + def __init__(self, input_nc, ndf=64, n_layers=3, n_class=3, norm_layer=nn.BatchNorm2d): + """Construct a PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + n_layers (int) -- the number of conv layers in the discriminator + norm_layer -- normalization layer + """ + super(NLayerDiscriminatorCls, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func != nn.BatchNorm2d + else: + use_bias = norm_layer != nn.BatchNorm2d + + kw = 4 + padw = 1 + sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, n_layers): # gradually increase the number of filters + nf_mult_prev = nf_mult + nf_mult = min(2 ** n, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + nf_mult_prev = nf_mult + nf_mult = min(2 ** n_layers, 8) + sequence1 = [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + sequence1 += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map + + sequence2 = [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + sequence2 += [ + nn.Conv2d(ndf * nf_mult, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + sequence2 += [ + nn.Conv2d(ndf * nf_mult, n_class, kernel_size=16, stride=1, padding=0, bias=use_bias)] + + + self.model0 = nn.Sequential(*sequence) + self.model1 = nn.Sequential(*sequence1) + self.model2 = nn.Sequential(*sequence2) + print(list(self.modules())) + + def forward(self, input): + """Standard forward.""" + feat = self.model0(input) + # patchGAN output (1 * 62 * 62) + patch = self.model1(feat) + # class output (3 * 1 * 1) + classl = self.model2(feat) + return patch, classl.view(classl.size(0), -1) + + +class PixelDiscriminator(nn.Module): + """Defines a 1x1 PatchGAN discriminator (pixelGAN)""" + + def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): + """Construct a 1x1 PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + """ + super(PixelDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func != nn.InstanceNorm2d + else: + use_bias = norm_layer != nn.InstanceNorm2d + + self.net = [ + nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), + norm_layer(ndf * 2), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] + + self.net = nn.Sequential(*self.net) + + def forward(self, input): + """Standard forward.""" + return self.net(input) + + +class HED(nn.Module): + def __init__(self): + super(HED, self).__init__() + + self.moduleVggOne = nn.Sequential( + nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False) + ) + + self.moduleVggTwo = nn.Sequential( + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False) + ) + + self.moduleVggThr = nn.Sequential( + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False) + ) + + self.moduleVggFou = nn.Sequential( + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False) + ) + + self.moduleVggFiv = nn.Sequential( + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=False) + ) + + self.moduleScoreOne = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1, stride=1, padding=0) + self.moduleScoreTwo = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0) + self.moduleScoreThr = nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1, stride=1, padding=0) + self.moduleScoreFou = nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, stride=1, padding=0) + self.moduleScoreFiv = nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, stride=1, padding=0) + + self.moduleCombine = nn.Sequential( + nn.Conv2d(in_channels=5, out_channels=1, kernel_size=1, stride=1, padding=0), + nn.Sigmoid() + ) + + def forward(self, tensorInput): + tensorBlue = (tensorInput[:, 2:3, :, :] * 255.0) - 104.00698793 + tensorGreen = (tensorInput[:, 1:2, :, :] * 255.0) - 116.66876762 + tensorRed = (tensorInput[:, 0:1, :, :] * 255.0) - 122.67891434 + + tensorInput = torch.cat([ tensorBlue, tensorGreen, tensorRed ], 1) + + tensorVggOne = self.moduleVggOne(tensorInput) + tensorVggTwo = self.moduleVggTwo(tensorVggOne) + tensorVggThr = self.moduleVggThr(tensorVggTwo) + tensorVggFou = self.moduleVggFou(tensorVggThr) + tensorVggFiv = self.moduleVggFiv(tensorVggFou) + + tensorScoreOne = self.moduleScoreOne(tensorVggOne) + tensorScoreTwo = self.moduleScoreTwo(tensorVggTwo) + tensorScoreThr = self.moduleScoreThr(tensorVggThr) + tensorScoreFou = self.moduleScoreFou(tensorVggFou) + tensorScoreFiv = self.moduleScoreFiv(tensorVggFiv) + + tensorScoreOne = nn.functional.interpolate(input=tensorScoreOne, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) + tensorScoreTwo = nn.functional.interpolate(input=tensorScoreTwo, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) + tensorScoreThr = nn.functional.interpolate(input=tensorScoreThr, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) + tensorScoreFou = nn.functional.interpolate(input=tensorScoreFou, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) + tensorScoreFiv = nn.functional.interpolate(input=tensorScoreFiv, size=(tensorInput.size(2), tensorInput.size(3)), mode='bilinear', align_corners=False) + + return self.moduleCombine(torch.cat([ tensorScoreOne, tensorScoreTwo, tensorScoreThr, tensorScoreFou, tensorScoreFiv ], 1)) + +# class for VGG19 modle +# borrows largely from torchvision vgg +class VGG19(nn.Module): + def __init__(self, init_weights=None, feature_mode=False, batch_norm=False, num_classes=1000): + super(VGG19, self).__init__() + self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'] + self.init_weights = init_weights + self.feature_mode = feature_mode + self.batch_norm = batch_norm + self.num_clases = num_classes + self.features = self.make_layers(self.cfg, batch_norm) + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + # print('----------load the pretrained vgg net---------') + # if not init_weights == None: + # print('load the weights') + # self.load_state_dict(torch.load(init_weights)) + + + def make_layers(self, cfg, batch_norm=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + def forward(self, x): + if self.feature_mode: + module_list = list(self.features.modules()) + for l in module_list[1:27]: # conv4_4 + x = l(x) + if not self.feature_mode: + x = self.features(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + + return x + +class Classifier(nn.Module): + def __init__(self, input_nc, classes, ngf=64, num_downs=3, norm_layer=nn.BatchNorm2d, use_dropout=False, h=512, w=512, dim=4096): + super(Classifier, self).__init__() + self.input_nc = input_nc + self.ngf = ngf + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1, bias=use_bias), nn.LeakyReLU(0.2, True)] + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, num_downs): + nf_mult_prev = nf_mult + nf_mult = min(2 ** n, 8) + model += [ + nn.Conv2d(int(ngf * nf_mult_prev), int(ngf * nf_mult), kernel_size=4, stride=2, padding=1, bias=use_bias), + norm_layer(int(ngf * nf_mult)), + nn.LeakyReLU(0.2, True) + ] + nf_mult_prev = nf_mult + nf_mult = min(2 ** num_downs, 8) + model += [ + nn.Conv2d(ngf * nf_mult_prev, ngf * nf_mult, kernel_size=4, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + self.encoder = nn.Sequential(*model) + + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, dim), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(dim, dim), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(dim, classes), + ) + + def forward(self, x): + ax = self.encoder(x) + #print('ax',ax.shape) # (8, 512, 7, 7) + ax = ax.view(ax.size(0), -1) # view -- reshape + return self.classifier(ax) diff --git a/robot_painting/qmupd_vs/models/networks_basic.py b/robot_painting/qmupd_vs/models/networks_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..d71d6b383b9763bce2c1c19ae703966d87ba8cdf --- /dev/null +++ b/robot_painting/qmupd_vs/models/networks_basic.py @@ -0,0 +1,187 @@ + +from __future__ import absolute_import + +import sys +import torch +import torch.nn as nn +import torch.nn.init as init +from torch.autograd import Variable +import numpy as np +from pdb import set_trace as st +from skimage import color +from IPython import embed +from . import pretrained_networks as pn + +from util import util + +def spatial_average(in_tens, keepdim=True): + return in_tens.mean([2,3],keepdim=keepdim) + +def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W + in_H = in_tens.shape[2] + scale_factor = 1.*out_H/in_H + + return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) + +# Learned perceptual metric +class PNetLin(nn.Module): + def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True): + super(PNetLin, self).__init__() + + self.pnet_type = pnet_type + self.pnet_tune = pnet_tune + self.pnet_rand = pnet_rand + self.spatial = spatial + self.lpips = lpips + self.version = version + self.scaling_layer = ScalingLayer() + + if(self.pnet_type in ['vgg','vgg16']): + net_type = pn.vgg16 + self.chns = [64,128,256,512,512] + elif(self.pnet_type=='alex'): + net_type = pn.alexnet + self.chns = [64,192,384,256,256] + elif(self.pnet_type=='squeeze'): + net_type = pn.squeezenet + self.chns = [64,128,256,384,384,512,512] + self.L = len(self.chns) + + self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune) + + if(lpips): + self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) + self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) + self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) + self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) + self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) + self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4] + if(self.pnet_type=='squeeze'): # 7 layers for squeezenet + self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout) + self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout) + self.lins+=[self.lin5,self.lin6] + + def forward(self, in0, in1, retPerLayer=False): + # v0.0 - original release had a bug, where input was not scaled + in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1) + outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input) + feats0, feats1, diffs = {}, {}, {} + + for kk in range(self.L): + feats0[kk], feats1[kk] = util.normalize_tensor(outs0[kk]), util.normalize_tensor(outs1[kk]) + diffs[kk] = (feats0[kk]-feats1[kk])**2 + + if(self.lpips): + if(self.spatial): + res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)] + else: + res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)] + else: + if(self.spatial): + res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)] + else: + res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)] + + val = res[0] + for l in range(1,self.L): + val += res[l] + + if(retPerLayer): + return (val, res) + else: + return val + +class ScalingLayer(nn.Module): + def __init__(self): + super(ScalingLayer, self).__init__() + self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None]) + self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None]) + + def forward(self, inp): + return (inp - self.shift.to(inp.device)) / self.scale.to(inp.device) + + +class NetLinLayer(nn.Module): + ''' A single linear layer which does a 1x1 conv ''' + def __init__(self, chn_in, chn_out=1, use_dropout=False): + super(NetLinLayer, self).__init__() + + layers = [nn.Dropout(),] if(use_dropout) else [] + layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),] + self.model = nn.Sequential(*layers) + + +class Dist2LogitLayer(nn.Module): + ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) ''' + def __init__(self, chn_mid=32, use_sigmoid=True): + super(Dist2LogitLayer, self).__init__() + + layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),] + layers += [nn.LeakyReLU(0.2,True),] + layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),] + layers += [nn.LeakyReLU(0.2,True),] + layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),] + if(use_sigmoid): + layers += [nn.Sigmoid(),] + self.model = nn.Sequential(*layers) + + def forward(self,d0,d1,eps=0.1): + return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1)) + +class BCERankingLoss(nn.Module): + def __init__(self, chn_mid=32): + super(BCERankingLoss, self).__init__() + self.net = Dist2LogitLayer(chn_mid=chn_mid) + # self.parameters = list(self.net.parameters()) + self.loss = torch.nn.BCELoss() + + def forward(self, d0, d1, judge): + per = (judge+1.)/2. + self.logit = self.net.forward(d0,d1) + return self.loss(self.logit, per) + +# L2, DSSIM metrics +class FakeNet(nn.Module): + def __init__(self, use_gpu=True, colorspace='Lab'): + super(FakeNet, self).__init__() + self.use_gpu = use_gpu + self.colorspace=colorspace + +class L2(FakeNet): + + def forward(self, in0, in1, retPerLayer=None): + assert(in0.size()[0]==1) # currently only supports batchSize 1 + + if(self.colorspace=='RGB'): + (N,C,X,Y) = in0.size() + value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N) + return value + elif(self.colorspace=='Lab'): + value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), + util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') + ret_var = Variable( torch.Tensor((value,) ) ) + if(self.use_gpu): + ret_var = ret_var.cuda() + return ret_var + +class DSSIM(FakeNet): + + def forward(self, in0, in1, retPerLayer=None): + assert(in0.size()[0]==1) # currently only supports batchSize 1 + + if(self.colorspace=='RGB'): + value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float') + elif(self.colorspace=='Lab'): + value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), + util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') + ret_var = Variable( torch.Tensor((value,) ) ) + if(self.use_gpu): + ret_var = ret_var.cuda() + return ret_var + +def print_network(net): + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + print('Network',net) + print('Total number of parameters: %d' % num_params) diff --git a/robot_painting/qmupd_vs/models/pretrained_networks.py b/robot_painting/qmupd_vs/models/pretrained_networks.py new file mode 100644 index 0000000000000000000000000000000000000000..b1329d64b798229bb16578f5bcaa1dff7d660a8e --- /dev/null +++ b/robot_painting/qmupd_vs/models/pretrained_networks.py @@ -0,0 +1,181 @@ +from collections import namedtuple +import torch +from torchvision import models +from IPython import embed + +class squeezenet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(squeezenet, self).__init__() + pretrained_features = models.squeezenet1_1(pretrained=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.slice6 = torch.nn.Sequential() + self.slice7 = torch.nn.Sequential() + self.N_slices = 7 + for x in range(2): + self.slice1.add_module(str(x), pretrained_features[x]) + for x in range(2,5): + self.slice2.add_module(str(x), pretrained_features[x]) + for x in range(5, 8): + self.slice3.add_module(str(x), pretrained_features[x]) + for x in range(8, 10): + self.slice4.add_module(str(x), pretrained_features[x]) + for x in range(10, 11): + self.slice5.add_module(str(x), pretrained_features[x]) + for x in range(11, 12): + self.slice6.add_module(str(x), pretrained_features[x]) + for x in range(12, 13): + self.slice7.add_module(str(x), pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1 = h + h = self.slice2(h) + h_relu2 = h + h = self.slice3(h) + h_relu3 = h + h = self.slice4(h) + h_relu4 = h + h = self.slice5(h) + h_relu5 = h + h = self.slice6(h) + h_relu6 = h + h = self.slice7(h) + h_relu7 = h + vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7']) + out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7) + + return out + + +class alexnet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(alexnet, self).__init__() + alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.N_slices = 5 + for x in range(2): + self.slice1.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(2, 5): + self.slice2.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(5, 8): + self.slice3.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(8, 10): + self.slice4.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(10, 12): + self.slice5.add_module(str(x), alexnet_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1 = h + h = self.slice2(h) + h_relu2 = h + h = self.slice3(h) + h_relu3 = h + h = self.slice4(h) + h_relu4 = h + h = self.slice5(h) + h_relu5 = h + alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5']) + out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) + + return out + +class vgg16(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(vgg16, self).__init__() + vgg_pretrained_features = models.vgg16(pretrained=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.N_slices = 5 + for x in range(4): + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(4, 9): + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(9, 16): + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(16, 23): + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + for x in range(23, 30): + self.slice5.add_module(str(x), vgg_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1_2 = h + h = self.slice2(h) + h_relu2_2 = h + h = self.slice3(h) + h_relu3_3 = h + h = self.slice4(h) + h_relu4_3 = h + h = self.slice5(h) + h_relu5_3 = h + vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) + out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) + + return out + + + +class resnet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True, num=18): + super(resnet, self).__init__() + if(num==18): + self.net = models.resnet18(pretrained=pretrained) + elif(num==34): + self.net = models.resnet34(pretrained=pretrained) + elif(num==50): + self.net = models.resnet50(pretrained=pretrained) + elif(num==101): + self.net = models.resnet101(pretrained=pretrained) + elif(num==152): + self.net = models.resnet152(pretrained=pretrained) + self.N_slices = 5 + + self.conv1 = self.net.conv1 + self.bn1 = self.net.bn1 + self.relu = self.net.relu + self.maxpool = self.net.maxpool + self.layer1 = self.net.layer1 + self.layer2 = self.net.layer2 + self.layer3 = self.net.layer3 + self.layer4 = self.net.layer4 + + def forward(self, X): + h = self.conv1(X) + h = self.bn1(h) + h = self.relu(h) + h_relu1 = h + h = self.maxpool(h) + h = self.layer1(h) + h_conv2 = h + h = self.layer2(h) + h_conv3 = h + h = self.layer3(h) + h_conv4 = h + h = self.layer4(h) + h_conv5 = h + + outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5']) + out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) + + return out diff --git a/robot_painting/qmupd_vs/models/test_model.py b/robot_painting/qmupd_vs/models/test_model.py new file mode 100644 index 0000000000000000000000000000000000000000..b86872218cbf60e61e76989649799adb993de3bf --- /dev/null +++ b/robot_painting/qmupd_vs/models/test_model.py @@ -0,0 +1,96 @@ +from .base_model import BaseModel +from . import networks +import torch +import pdb + +class TestModel(BaseModel): + """ This TesteModel can be used to generate CycleGAN results for only one direction. + This model will automatically set '--dataset_mode single', which only loads the images from one collection. + + See the test instruction for more details. + """ + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + + The model can only be used during test time. It requires '--dataset_mode single'. + You need to specify the network using the option '--model_suffix'. + """ + assert not is_train, 'TestModel cannot be used during training time' + parser.set_defaults(dataset_mode='single') + parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.') + parser.add_argument('--style_control', type=int, default=0, help='use style_control') + parser.add_argument('--sfeature_mode', type=str, default='vgg19_softmax', help='vgg19 softmax as feature') + parser.add_argument('--sinput', type=str, default='sind', help='use which one for style input') + parser.add_argument('--sind', type=int, default=0, help='one hot for sfeature') + parser.add_argument('--svec', type=str, default='1,0,0', help='3-dim vec') + parser.add_argument('--simg', type=str, default='Yann_Legendre-053', help='drawing example for style') + parser.add_argument('--netga', type=str, default='resnet_style_9blocks', help='net arch for netG_A') + parser.add_argument('--model0_res', type=int, default=0, help='number of resblocks in model0') + parser.add_argument('--model1_res', type=int, default=0, help='number of resblocks in model1 (after insert style, before 2 column merge)') + + return parser + + def __init__(self, opt): + """Initialize the pix2pix class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + assert(not opt.isTrain) + BaseModel.__init__(self, opt) + # specify the training losses you want to print out. The training/test scripts will call + self.loss_names = [] + # specify the images you want to save/display. The training/test scripts will call + #self.visual_names = ['real', 'fake', 'rec', 'fake_B'] + self.visual_names = ['real', 'fake'] + # specify the models you want to save to the disk. The training/test scripts will call and + self.model_names = ['G' + opt.model_suffix, 'G_B'] # only generator is needed. + if not self.opt.style_control: + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, + opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) + else: + print(opt.netga) + print('model0_res', opt.model0_res) + print('model1_res', opt.model1_res) + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netga, opt.norm, + not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, opt.model0_res, opt.model1_res) + + self.netGB = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, + opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) + # assigns the model to self.netG_[suffix] so that it can be loaded + # please see + setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self. + setattr(self, 'netG_B', self.netGB) # store netGB in self. + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input: a dictionary that contains the data itself and its metadata information. + + We need to use 'single_dataset' dataset mode. It only load images from one domain. + """ + self.real = input['A'].to(self.device) + self.image_paths = input['A_paths'] + if self.opt.style_control: + self.style = input['B_style'] + + def forward(self): + """Run forward pass.""" + if not self.opt.style_control: + self.fake = self.netG(self.real) # G(real) + else: + #print(torch.mean(self.style,(2,3)),'style_control') + self.fake = self.netG(self.real, self.style) + + def optimize_parameters(self): + """No optimization for test model.""" + pass diff --git a/robot_painting/qmupd_vs/operator_main.ipynb b/robot_painting/qmupd_vs/operator_main.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..b1fe6961d9f3e756531a67f649983428b10bff46 --- /dev/null +++ b/robot_painting/qmupd_vs/operator_main.ipynb @@ -0,0 +1,606 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "138ed57c06ca45c786e09bcf744f4d54", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "CameraStream(constraints={'facing_mode': 'user', 'audio': False, 'video': {'width': 512, 'height': 512, 'facin…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d0e4ef53014b4bbab34e6ba90336ad52", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "ImageRecorder(image=Image(value=b''), stream=CameraStream(constraints={'facing_mode': 'user', 'audio': False, …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from ipywebrtc import CameraStream, ImageRecorder\n", + "from IPython.display import display\n", + "import PIL.Image\n", + "import io\n", + "import numpy\n", + "import cv2\n", + "from ipywebrtc import CameraStream\n", + "camera = CameraStream.facing_user(audio=False, constraints={\n", + " 'facing_mode': 'user',\n", + " 'audio': False,\n", + " 'video': { 'width': 512, 'height': 512 }\n", + "})\n", + "display(camera)\n", + "recorder = ImageRecorder(stream=camera)\n", + "display(recorder)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAicAAADCCAYAAACSRmLFAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAADBJklEQVR4nOydd3hUZfq/7+kzyWTSe28EQiCU0EITkCoWEHQtYFt3ddW1Ytvvquuuva5rXXVxERVlsaKAVOkt1JCQhIT0nkmbTJ85vz9yzfkxJoEEqXru6+LSmXPmnDOT97zneZ/yeWSCIAhISEhISEhISFwgyM/3BUhISEhISEhInIhknEhISEhISEhcUEjGiYSEhISEhMQFhWScSEhISEhISFxQSMaJhISEhISExAWFZJxISEhISEhIXFBIxomEhISEhITEBYVknEhISEhISEhcUEjGiYSEhISEhMQFhWScSEhISEhISFxQnDfj5K233iIhIQGtVsuoUaPYvXv3+boUCYk+IY1diYsVaexKXCycF+Pk888/54EHHuDJJ59k3759ZGZmMn36dOrr68/H5UhI9Bpp7EpcrEhjV+JiQnY+Gv+NGjWKESNG8OabbwLgdruJjY3lnnvu4dFHHz3l591uN9XV1fj5+SGTyc725Ur8ShEEgfb2dqKiopDLe2enS2NX4kJAGrsSFyu9HbvKc3hNANjtdnJycnjsscfE9+RyOZdeeik7duzo9jM2mw2bzSa+rqqqIj09/axfq8Rvg4qKCmJiYk65nzR2JS40pLErcbFyqrF7zo2TxsZGXC4X4eHhXu+Hh4dz9OjRbj/z3HPP8be//a3L+xUVFRgMhrNynRK/ftra2oiNjcXPz69X+0tjV+JCQRq7Ehcr33zzDQsXLjzl2D3nxsnp8Nhjj/HAAw+Irz03psFgkG4SiV/M2XRRS2NX4mwijV2Jiw0fHx/g1GP3nBsnISEhKBQK6urqvN6vq6sjIiKi289oNBo0Gs25uDwJiR6Rxq7ExYo0diUuNs55tY5arWb48OGsX79efM/tdrN+/XrGjBlzri9HQqLXSGNX4mJFGrsSFxvnJazzwAMPcNNNN5GVlcXIkSN5/fXX6ejo4JZbbjkflyMh0WuksStxsSKNXYmLifNinFx77bU0NDTwxBNPUFtby5AhQ1i9enWXZC0JiQsNaexKXKxIY1fiYuK86Jz8Utra2vD396e1tVVKzJI4bc7HOJLGrsSZQBq7EhcrK1asYN68eaccR1JvHQkJCQkJCYkLCsk4kZCQkJCQkLigkIwTCQkJCQkJiQuKi0KETeLc4HA4OHbsGIWFhQQGBpKenk5wcLDUR0NCQkLiLCAIAnV1deTm5lJVVYVerycpKYm0tDRRrOy3imSc/MawWq0oFApUKhWCIGCxWMjPz2fTpk389NNPbNq0ifb2dtRqNWFhYVx33XXMmzePiooKhg0bRmJi4vn+ChISEhIXDVarlerqarRaLQEBATgcDmpraykpKaGoqAiZTEZ8fDzJycmYTCYOHDjAtm3biI2NpbW1lbS0NEaMGNHrBo+/FiTj5FeKIAg4HA6MRiM1NTWUlpaye/duVq9eTXBwMMnJybS3t7N//36qqqpob28HIDQ0lAkTJqBQKMjJyeHll1/m9ddfx8fHh4iICP7v//6Pfv36kZGR8Zu37CUkJCQ8CIKA0WjEbDZTVVWF0Wikvr6e5uZmWltbvRaFdrud/v37ExUVRUZGBocPH+bbb7/F6XSSlZVFamoqLS0tVFZW0tLSQm5uLmFhYQwcOJCEhITfhKEiGSe/IhwOBwUFBezcuZOVK1fS0NBAQUEBFosFs9nste+JSpHh4eFERkYybdo0XC6X2AgsNDSUqVOn8tlnnxEbG0t+fj4LFixAq9UyZMgQ7rvvPi677DL0ev05/Z4SEhISFwp1dXUcPHiQsrIympqa0Ol0hIWF4ePjQ0xMDK2treh0OqxWK1arlbi4OKxWK3V1dbS2trJmzRrkcjk+Pj4IgkBxcTE7d+4kOzsbq9WK2+0WDZUDBw4QHx/P2LFjSUxM/FUbKZJx8ivAZDKRk5PD66+/ztq1azGbzQiCgEqlIiQkhMGDB6NQKNi0aRNOp9Prs2PHjmXo0KGsWrWKlStXUlFRgcvlErdXVVXhdDrJzc0V37NarezcuZMbb7yRrKws7r//fubMmYNKpTpn31lCQkLifFJWVsa+ffvIz8/H5XKh0+nQaDQMHjyYsrIyamtraWtrQ61WExoaiiAI+Pr6UltbS2NjI/X19cTGxhIaGopGo0EmkyGXywkLC8NkMlFVVUVkZCTt7e3Y7Xbkcjl+fn7U1dWxZMkSUlNTueKKK361mjOScXIR43A42LFjB08//TRbt27FZrMBoFAoGDduHA888ABpaWkcOnSITz75xMvoAIiKimL06NG89dZbWK3Wbs+RmJhIYGAghYWFXbYplUoSExO59dZbycvL4/7778ff3//Mf1EJCQmJC4S6ujp27NhBbm4uZrOZIUOGkJiYyJ49ezAajZSXlwOd87NnTtbr9YSEhLBt2zYUCgVtbW1ER0cDnQ0WPYvJ8PBwGhoakMvlNDU1YbFYiIiIwGKxoFAosNls2Gw2HA4HarWaFStWMGnSJBISEs7Xz3HWkIyTixCn08n27dt57bXX+PHHHzGbzajVapRKJYGBgTz66KNcf/31fP755zz88MMcO3asi2Gi1+uRyWT861//wm6393iu0tJSWltbu91ms9nQaDT84Q9/4IUXXiAnJ4dnn32WjIwMqcJHQkLiV0VLSwubNm0iLy8Ph8OB2+0WwzAFBQWEhYVxySWXcPjwYerr64HOhos6nY7AwEB27tyJWq3G399f9GDLZDJUKhUKhQKXy0VNTQ0ulwsfHx/8/PxobW2lurqaoKAgGhoaEAQBmUyGWq2mtLSUpKQk1q9fT3x8POPGjUOr1Z7Pn+iMIhknFxnNzc28+OKL/POf/xSt6WnTprFo0SI0Gg2RkZGEhoZyzz33sHTpUnrqTmCxWOjo6Ohxu4f6+voeE18FQWDJkiXMnTuXv/71r7z66qtMnDiRhx56iHvuuQc/P79f/H0lJCQkzjfFxcWsWrWKjo4OwsPDaWpqwul0iguxsLAwoqOj+fbbb2lpaUGhUOB2u8VQTUFBAS6XC7fbjVwuF/9FRUWhVqtpaWnB4XDQ1tYGdHq/9Xo9ra2tuN1umpqagE5vtWeh2dzczO7duxk/fjwmk4mPP/6YyZMn/2pyUSTj5CKivr6em266iTVr1iAIAqGhoTzyyCP88Y9/FJNSbTYbf/zjH09qmABdPCknw+12n3Tb//73P8rLy3nqqaf48MMP+cc//sG3337Lfffdx+zZs6WEWQkJiYuWwsJCvv76a4KCgsjIyGD//v0EBAQwffp04uPjgc5QzyeffILdbkelUiGXy9FoNCgUCpTKzsesp4zY6XQiCAIKhQKZTEZxcTHQ6c1OT0+nuLgYm81GVVWVeA0ej0lKSgrNzc3U19ej0WgA2LNnDyEhIURGRrJq1Sr8/f255JJLiI2NPce/1Jnl4jevfiPU19ezcOFCVq9ejUKhIC0tjU8++YQHH3zQ6+G/du1aPvvsMwRBwMfHRxzAP0ej0TBv3rxelQP3lI9yIrt37+bNN99k5syZPPbYY9TW1nLjjTdyzTXXcPDgwVN6aCQkJCQuNAoLC/nmm28IDAyksbGRzZs3k5qaysKFC0XDRBAENm3ahMvlYujQoTidTux2OyaTiba2NpqammhqaqKlpUV8z1NOXFFRgSAICIKAyWSiqKiIgIAAxowZg4+PD4GBgaIRIwgCRUVFBAcHExwcjFKpFMPnx44dY/PmzURGRpKQkMCyZcvYsGEDJpPpfP58vwjJOLkI8Bgma9asISUlheXLl7N9+3amTp3qtZ/b7ebTTz/FbreTkJDAnXfe2cX48LgS//Of//Dhhx8SFxcnbuvXrx9Tpkw57essKCjg+eefZ8mSJTzyyCMMHz6cVatWMX36dB555BFKSkokI0VCQuKi4ETDpLm5mbi4OObMmcPMmTO9cjtaWlqoqqoiJiaGPXv2UFtbi9vtFj0ogBjeSUlJYdq0aQiCgEajQa1W43a7sdvtCIKA2+2mtraWgwcPYrPZaG1t9fJyu1wu8vLy8PPz45JLLkEul6NWqxk2bBhZWVkcOnSI7du3k5iYiMlk4qOPPmLr1q1dpCQuBiTj5AKnqampi2Fy1VVXERQU1GXf0tJS1q1bx5w5c1i9ejXFxcU0NzcDnTfH1Vdfzbp169iwYQPXX389Go1GLENTq9W8+OKL/PGPfxT395S3RUREkJaWJt5onsTb7rj88svRarU8++yzzJs3j9tvvx2Hw8FLL71EdnY2jzzyCMePHz8bP5WEhITEGaGoqIivv/6awMBAjEYjY8aM4brrrmPQoEEoFAqvfXNzc/H19cVsNjN48GAiIyNRqVTExcWJya1paWlMmDCB6667jpCQENFQcTqdDBgwgMTERHQ6HQEBAchkMjo6OnA6nbhcLtFrIpPJxH8eUU2ZTIbL5SI3NxdBEBgwYABms1l8PX78ePLy8njvvffYvXv3RbU4lIyTCxi3280rr7zCjz/+KBomQ4YM6XH/oKAg/vOf/7BkyRIANm7cCHR6S66++mo++OADJk2aRFpaGtAZ2pk5cyYAqampTJ48mfz8fAAmTpzIunXrePvtt1m+fDm7du3ikUceQS6XM3DgQL7++muSkpK6VOXs3LlTzIF58skncTqdvPTSS/z+97+nra2Nl156iRkzZnDgwIEz/GtJSEhI/HLa29tZs2YNdrudhoYGxowZw7hx43qsQIyIiCAsLIxp06aJVZAxMTEUFxfj6+vLNddcw7XXXsvEiRNRKBRERUUREBBAcXExbrebgQMHYrVaGTt2LCaTiYEDBxIVFcWQIUOYN28ewcHBREVFodPpSE5ORi6XI5PJaG1tpX///gwaNAg/Pz+OHz+O3W4nIyMDt9vNxo0bxQRdpVJJTk4O27Ztu2gMFMk4uYDZvHkzb7/9NsnJyac0TKAz4cqTgPrZZ5+Jg/df//oXH374IQEBAV3KhrOyslAoFNxxxx0olUq+++47AK655hrGjRvHHXfcwbhx4/D39+f+++8nKiqKtLQ0xo0bx9q1a1m8eDGRkZHijVtfX8/TTz/N+PHjcbvdLF68mHvuuYfY2FiuvPJKoLNS6MYbb5Q8KBISEhccGzduJCUlhcjISMaNG3dSwwQ6F3Zz5swhPDyc3NxcAgICqKysJDIykmuuuYaYmBiqq6tFo0CtVhMUFERUVBSRkZFotVqcTicqlQqDwcDVV1/NbbfdxpVXXklGRgbZ2dnY7XaUSiUzZszg0ksvJTY2FoVCQV5eHrW1tQwdOhSNRsPhw4cpKysjIiJC9OC0tLQA0NbWxu7du9m/f/+5+Bl/MZJxcoHS3NzMokWLMBgMfPHFFz0aJm1tbd1W0yiVSm6++WZ+/PFH/vSnP2EwGGhsbOSZZ57x2i8jI4OwsDCmTJkiCgvJZDJ8fX27HFOn0+Hj48OVV16JXC4nKSmJm266iZ07d/Lcc88RGRkp1uOvXLmSAQMGAGA2m3nllVcYOnQoer2e/v37c9ttt4ky+RISEhIXAgUFBdTX19PW1kZqamq3hoknMfXnyaaCIBAeHo4gCFx22WXceuutxMXFkZ+fz44dO7z0ohITE0lOTiYlJYXDhw9jsVj46aefUKlUyGQyMQkWOufd0NBQwsLCCA4OZsyYMSxYsICpU6cSGhpKfX09hw4dIjw8HIPBQGtrK1arFYPBgN1ux2g0EhISgsvlQi6XU1tbe1F4TyTj5ALE7Xbz0ksvcfToUZ555hmGDh3a7X55eXncd9993VbTLFq0iP/85z9e5WTV1dWsW7cOi8UivqfVasUbYd++fVitVkJCQhg7dmyXYzY1NdHa2kp4eLjX+3FxcTz88MPs3r2bd955B4VCQU1NjVfopq2tjU8++YSMjAw2bNjAnj17GDlyZF9/GgkJCYmzgslkYuPGjQQHByMIAqNHj+5imDgcDvbu3Ut5eTl1dXVe2zQaDWPHjuXmm29m2LBhYglxQ0MD/fr1w2g0ivuq1Wri4uKQyWRUVVWhVCqx2Wz079+/y3V5lGL9/PzE61EqlYwZM4ZbbrmFmTNnEhAQQHNzM+np6SiVStrb20WhuMDAQMrLy0U9lMTExItCJFMyTi4w3G43//73v/nnP//JrFmzuO6667rd74cffuCyyy5j4sSJ3ZYDe5JZT0QQBKqqqrwyt+12exfPyzXXXENSUlKXYzY0NGA0Glm+fDkOh8NrmyfOmp2dTUBAQLfXfPjwYWpra3G5XCxbtozbbrvN64aVkJCQOB9YLBZWrlxJeno65eXlTJkyBbVa7bWP2+3m+++/Z+PGjXR0dHQrGR8TE9NtCw9/f38xvAKd825VVZXowRg9ejQajYbhw4d3+WxTUxMajYaKigqxwMHzOa1Wy6hRoxg2bBgdHR1UVlaKxRLNzc0oFAp0Oh1qtZqKigr8/Pz44YcfKCkpOa3f6VwiGScXGHv27OHxxx9Hp9Px0EMPidb3iRQUFLBo0SIGDhzYo/HSHfv37yckJASdTie+d+jQIVGG3nPzDBo06KSW9ZIlS9i5c2e3hkpERASpqalA5+rglltuEbPbZTIZlZWVpKenExERwTfffMOtt94qGSgSEhLnDUEQ+Omnn/D396ejo4P+/fsTEhLSZZ+9e/eKyaUjRozo9fEbGhpYt26dl9FSWVnJTz/9hNvtxu1243A4CAgI6FGXymQyYTab+emnnzh+/DiffPKJl7Hj8ZB7jCDPAtRgMCCXyxkyZAiVlZW0t7cTEBDAV199dcEbKJJxcgFRV1fHQw89RHNzM7///e/Jysrqso/JZOKuu+6irq6Op59+GqvVyoYNGwC84ohut1tsOgWd9fHr168nKirKy9PicrmIjY0lMjJSLE1LSUnp9vqsViuCIIhdNpcsWcLChQvZtm2buI9MJhMNKo1Gw7333itWB4WEhDB+/Hhqa2uZO3cuMpmMb7/9lqeffrpLt2QJCQmJc0FhYaEY0i4uLmb8+PFd9vF0IPb19WXw4MG4XC5++OEHHA4Hra2t4txrMpnEvjoARqORqqoqWlpavDzK/v7+TJ06FR8fH5xOJ0qlEoPB0KVMGTp7qUVGRhIYGIivry/ff/+9OJ/D/1ePhc5wj9vtJjExUTyPw+Fg8+bNBAUFUVFRgd1uJywsjG+++UaUy78QkYyTCwSz2cwtt9zC1q1bSU5O5o477ujWe/Hxxx+Tk5PD66+/TmZmJs888wybNm2ioaGBxx9/XKzGKS8v5y9/+Yso4FNeXs7q1au7CLdBZ3KW1WqluroavV7fJafEw7p163A6naSkpJCUlITZbMZqtfLNN98gCAIrV65kw4YNXoaGXq8XlRRbW1sJCAjAaDQSERGBr68vgiDw/vvvs3PnToCTNiGUkJCQOJOUl5fz9ddfYzAYyM3NZeTIkV3C5E6nk7Vr1+Lj44NcLmfo0KGUlpbi4+NDQUEBW7ZsobKyEoAjR45w4MABsRfOkSNH6OjoQK/Xe3lOZDKZqGPi5+dHS0sLwcHBXa7PoyIrl8sxmUwkJSXhdDpJT0+noqKClpYWvv/+e1avXo3b7RaNm6CgIDGkExgYKBpHHR0dNDQ0MHjwYGw2Gxs3bkQQBCorKy+4BaJknFwAOBwOnn32WX788Uf8/Px49tlnu41nNjc3s2TJEv71r39xww034HQ6+fHHHxk+fDgvvPAC69atw2w2YzQayc/P5+OPPxb7M3z22WfI5XKuuOKKLsdNTk7GYrFQU1ODn5+f2Mr7RARB4NChQwCMHDkSlUolbtu5cycOh4OlS5cyd+5cr1I1hULBgw8+SHBwMDabja+//lrc5jG+zGYzTzzxBCaTiXvuuYctW7ac1u8oISEh0Vuam5v54Ycf8PX1xWKx0K9fP4YNG9Zlv2PHjhEQEIBWq2X27Nm0trYSGhpKU1MTW7Zsobq6mrKyMvLz88X/5ufn43A4OHjwIGq1WkxUPZG2tjZcLhcGg4Hm5uYuoSTo1FxxOp34+Pggk8lEAyYkJARBECgrK+PYsWP4+fmhUChEAyM0NJTMzEwqKysZN24cycnJNDQ0AODr68uuXbvw9/cnPz+fnJwcioqKyMnJOdM/8S9CMk4uAL7++mtefvllZDIZL7/8MvPnz+92P5fLxZNPPskNN9wgPtgDAwPp168fa9asISsriz179jBlyhR27txJW1sbzc3N2O12vv/+e/r160dUVFSXYwqCwPbt22lra2PAgAE9dhNOSUnBz8+PhQsXIpPJMBgM4goAEEWETgwnAUyZMoXf/e53gHfoKTQ0lLi4OPr378/OnTvF8NTixYsvilI3CQmJixOHw8GaNWtoaWmho6OD0NBQrrzyym5z/JxOJ+Hh4Vx55ZVoNBoEQaC1tRW73U5dXR0tLS2UlpayZcsWjh49itVqpa2tjZqaGmw2G3K5vNvFptvtpri4mNjYWJqbm4mIiOiyj1qtRq/XU1tbS3JyMgaDAY1Gg9VqRS6X4+fnh1qtpr6+nvj4eLG8WS6XM2nSJARBYOPGjQQEBNDS0oJer8fHxwebzYbZbGbEiBEcPHiQyspKDh48eEGFeSTj5DxTWlrKfffdh8Ph4Pbbb2fBggU9JqOGhIQwY8YMr+1DhgwRk6GKiopEKeN//OMfOJ1OysvLqaqqorCwkNmzZ3t5PAC2bt2KSqXik08+wel00q9fv25vUJlMxlNPPcUPP/wglgCPHDkSpVJJY2MjbW1tzJgxg1GjRnn1nfDw5JNPimq0AAcPHmTw4MHU1tYyaNAgrFYrP/74I+PHj+enn36SkmQlJCTOGnv27CEvLw+lUolOp2PWrFndzlvQqQU1fvx4MVlVJpNRVlZGfHw8vr6+qNVqGhoaUKlUqNVqlEolRqORsrIy3G43Wq3WK1RutVrRarUYjUYaGhqIiYkB6HZR6Ofnx5w5c9DpdEyZMgW5XE5kZCTV1dX4+/vT1tZGRkYGZrOZsrIyL30qvV7PVVddhdvtpq6ujtDQUHx8fKipqWHgwIFUVlZSXV0tGmitra0XlDCmZJycRwRB4N///jfV1dVkZ2fzzDPPeFXSnAq1Ws29996LVqslPT2dxsZGRo8ezbhx43C73QQEBJCens4333yD0+lk1qxZXp+32Wzs2LGD4cOHc+TIEZRKJZdcckmP5/Px8WHcuHFij53k5GRSU1MJDw8nICCA1NRUNm7cyGOPPYZcLkcQBC834z//+U+xRPmnn37i2muvJSoqiuXLlyMIAjt37iQzM5P29naKior6+GtKSEhInBqLxcK+ffuIjo7GarUyY8YMwsLCTvqZExeEERERxMTEkJycjFqtJiAgQAyhhIaGYjabiYiIEI2ffv36eS0K6+rqCAsLo6amBn9/f4xGI0FBQV7d5U/E47Xx5MKkpKRQUlJCY2MjQUFBjB07lvnz5xMYGEh9fT0+Pj6iPERKSgrjx4/HZrMRHx8vdjlub28nMzOTmpoaTCaTaFSVl5f/0p/3jCEZJ+eR0tJSPvroIwIDA3nrrbd6bKbXEzKZjPj4eJRKJe+88w5vv/02fn5+YtfMN954A4B//vOfXHnllQwcONDr8263W6yBr6ysZMiQIcyePbvX5w8ODubaa68V+0lAp5rhfffdR3Z2Nh0dHV5CbKmpqbzyyiuoVCoaGhr45ptvePHFF0WD7PDhwxQUFBAdHU1FRUWffgsJCQmJ3rB//37CwsKIjIxk1KhR9OvXr0+f9/HxYcyYMURFRTF16lSxD47H0EhNTUWtViMIAoIgdBGbtNlsmEwm2tvbiYqKYufOnd1WCPVESkqKGC5SKpUoFAqSkpLEeX/MmDGUlpaK+48ePZqEhAQOHjxIv3790Gg0lJWVUVZWJoaHamtrSU9Pv6DE2STj5Dzy/vvvU1NTw/z588nIyPhFxwoKCiI7OxuFQkFWVhYPP/ww8+bNIz8/n5qaGq699lrR4+FBo9Hw4YcfIpPJCA0N5d577+1W0O1kLFq0iA8//NCrBM5gMPDWW2+h1+u7qNfOmDGDq666CoAVK1ZgMpmYO3cu0JmZvmjRIoxGo9gtWUJCQuJM4fGapKWlUVpa+otVqtPT05k2bRpBQUGkpaURGBjI9OnTKS8vx+l0Eh0d3UWUMioqSmz419TURFRUFHFxcb0+p1arZf78+UycONErTyUlJYW0tDR27twpVmlCZ/7J5MmTRfl6X19fXC4XCoUCtVqNRqPBbDZTXFz8i36LM03X5AKJc0JZWZnoNbn77ru7GA6/hMmTJzNp0iRkMhk1NTVMnjy523CNXC4nMTGR4OBgdu/efUrXZnfodDqxVPhE+vfvz4ABA6iurvZ6X6vV8ve//52NGzfS2NjI888/z5tvvsm3335Le3s7paWljB49mkmTJvX5WiQkJCROxoEDBwgNDaWsrIz09PRu1VxPl8mTJ2Oz2dDpdISEhBAQENBtTzS9Xs/o0aMpLi6mtbW120qeUxEeHt6t5EO/fv3Izc3F5XLhcDjEcFJERARjxoxh8+bNDB48mH379uHj44PFYiE4OFjswNwXz/nZRvKcnCdWr15NbW0tV111VZdwy5ngxDDLa6+9dtJcFoPBcFqGyclQKpWEhYV5hXU8JCQkMGbMGACKi4uprq72KnFWKpVn1FiTkJCQ8BQLREdHU1JS0ieV196gUqnQ6/W4XC46OjoICwsTk127Izk5mWHDhvWYiHs6eAyO9vZ2rx5q0OlZUSgUCIJAYGAgSqUSlUqFy+UShTIvpHn3wrmS3xAtLS28/fbbGAwGbrnlli4Dwm63U1pa2qXr5elw4403igPvXCKXyxk/fjxFRUVdbhKNRsOf//xnfHx8cLlc/Otf/+KWW24RXZT5+fmiqJGEhITEmaCwsBCZTIbJZCIlJaWL16S5udlLUO10UavVzJo164wbP70hKCiIkJAQzGYzjY2NXtuioqJITU2loKCA9PR0zGYzOp2O9vZ2wsPDkclkXrkq5xvJODkPrF69mtzcXGbNmtVt99+PPvqIzMxMnn322fNwdWeOq6++GqvVKqq/nsgll1zCuHHjAMjJyeGhhx4SG1Y1NTWxb9++s3593XVzlpCQ+HWya9cu+vXrR15eXpd51+l0smrVKsrKyli9enUXraa+otPpzktyqUKhYPDgwRgMhm7LgseOHYvNZhP76jQ1NaFQKCgpKcHf359jx46d9Wv0NC88FZJxch5YtWoVAHPmzOniNTEajbzxxhu0tbWdUXff+SA5OZkXXniBL774oouomlKp5JprrhFjrQcOHKCwsJAJEyYgk8nOeklbRUWFKAwnISHx66a9vZ3GxkbcbjfBwcFdklSPHj1KUVERdXV1mM3mC6pqpa94uhQrFAqv5oDQmasSHR1NTU0N/fr1IywsDB8fH4qLi0lOTsZsNp81GXuXy8X+/fvZtGlTr/aXEmLPMVarlcLCQiIiIrotH/vyyy/Jz89HqVR261W52JgxYwYjR47s9ma/+uqr+cc//iG6Ep1OJzabjeDgYFEq/0xiMpkoLi7m6NGjPPXUUxw9evSMn0NCQuLCw2g0otFoOH78OBkZGV7zkdvtZufOnajVajo6OkhLS0OtVp/Hq/1lyGQyRo4cSWlpaZeqR7lczogRIygrK6OiooLk5GQOHDiATCajurqa9vZ2rFZrj5orvcHpdOJ2u7FYLAiCQG1tLcePH6ehoYHS0lKvLvYnQzJOzjE1NTUUFhYycuTILkmoLpeLL7/8ErfbzZgxY8Sk0YsZuVzebc8I6OyYecUVV4h6LAC7d+/utXS9Z+CfmNPS3NxMcXExFouFxsZGOjo6yMvLw+12c/jwYcrLy7FaraJIkYSExK+f8vJyUaTs59WFDQ0N1NfX43a7kclkjBo16jxd5ZlDq9XSv3//brclJiZiMBgwmUwUFhYSEhIiGm+nwuVyYbFYyM/P98qJ9DQUtNvtOBwOXC4X7e3tKBQKAgMD8ff3p7y8nPj4eAoLC3v1HSTj5BxTUlJCS0sLY8eO7RLSKSkpET0GV111VZ81Ry42ZDIZt956K++//75oYPTGMGlpaeHjjz8mPz+/S9tvj/elu+OEhYUxaNAgDh48SHBwMH5+fpL3RELiN0BVVRUqlQq5XN6l+69HjdrPzw+5XN4nzZGLEb1ez8CBA9m1axdms5nJkyezY8cOjEbjSRdtx44dY926dbS1tXUpcoCuc7evr6+ofHv8+HFcLhfHjx//bXhOLrQWz71h+/btXq2tPbjdbl588UWqqqoIDAzsIjV/KlwuFzKZ7IIoBXO5XOTl5XHgwAHGjx9PbGxsl+/rITExkfj4+C5GQn5+PjabrVtrPicnh/vuu69P3g9fX18WL17Mpk2bWLRoEePHj8fhcJy01E9CQqIrVVVVaLXasxr6cLvdZ2wuczqd1NbWkpWVRU1NjVdIp7W1le3bt+Pr60tbWxvZ2dm9Pq/b7aa2thatVktgYGC38/q5QhAE6urqyMvLo7i4mIEDB5KUlERYWFi33yc+Pp6dO3cik8lobW0lPj6e/fv3I5PJaGpq6jass2/fPgRBwG63i+q3Hn7+7NFoNOh0OhoaGlAoFGRkZGC1WkWvzRdffHHK73RRGyfXXnstWVlZjB8/Hr1ej1KpJC0tTUyy9Eizny4ul4tVq1Yxa9asM3KjuN1ucnNzkclkXaTqc3Nz+d///gd0yrwnJyf36pjV1dWsW7eOjz/+mBtvvJHk5GS2b9/OnXfeKbrXQkJCzlqClyAItLe3iwN1165dvPLKK+Tl5VFbW0t4eDhDhw7l5ptv5vLLL+/y9/D19SUhIaGLcdLY2IjD4ejWONm1a1efwzLz588nJCSEtWvX8vjjjxMQEHBBdeCUkLhYKC8vp7q6WlQh9YRuPXNMcHAwISEhp53Q39LSwsaNG5k0aVKXxNXToa2tDUEQ0Gg0aLVar7lw9+7d2O12kpKSaGlp6dW8a7PZOHz4MIcOHUKtVuPn54dCoaCuro4JEyZgs9lwOBykpqb+otyNnnA4HJhMJiorK8V5sKSkhOPHj9PR0UFGRoYo4bB582amTp3a5XkTFBSEQqHA7XbT0NAgphi4XC7MZnOXc3oMPKPRiE6nQ6VSYbVavQwUQRBQq9VERERQU1NDY2MjOp0OPz8/goOD8ff3Jz09vdflyhe1cfLKK69QWFhIR0cH+fn5FBYW4uPjw/HjxzGbzQQHB5OYmMh1112HTqfDx8eH0NBQ5HJ5ryzcXbt28eWXX/bZi9ETLpeLkpISIiMjvUTHoFPK3ZNZfdNNN50y/tfS0sKbb77Jhx9+SFNTEw8//DDDhg1j3rx5FBcXs379esrLy+no6ODqq69m8ODBQKdQ0GWXXdbnPj4eHA4HFRUVWK1WfvjhB/bu3esll9zc3ExHRwcPPvggRqORgoICbr75Zu688062bNnCX/7yF0JDQ8XjKRQKrrjiCtasWeM10BsaGqisrOw2btra2tqna/b39+fWW2/lk08+weFwXPRVUBIS55vKykoUCgUBAQFotVrKysrw9/cXu5QXFRUhl8uRy+X4+PgQEhKCTqcT5QI8236OIAjk5eUREBBwxjzj7e3tyGQyjh49yrBhw0TjxOFwkJubS1hYmLiQ8nR474mysjK+++47HA4HgiBgMBgwGAwcOHCAyMhIvvzyS/Gh7efnJ3YJViqVTJgwgaSkpD6rwULnnLd//37sdjuFhYVi4qoHmUzGgAEDqKyspKKiAovFgkKhoLGxkWXLljFlyhSvHkKhoaFi88Hy8nLRKJPL5VRXVzNgwACv85/oMfGoyvr6+tLc3IwgCPj4+KDVajEYDFRXV+NwOJDJZCQkJFBWVkZhYSHz58/v03e+qI2To0eP8tFHHxEcHExsbCzjxo0jJSVFTHhyu9388MMPrF+/nv3792M2m+no6CAoKIikpCScTieJiYmMGDGChIQEfHx8xJV9e3s7//jHP1i0aNEZcy/W1tZSWlrKJZdc4tUTwel0smXLFqDT23Oqfg8tLS388Y9/ZPfu3cyePZubbrqJgIAAbrnlFgoLCwkMDGTdunWiVf3666+Ln5XJZAwfPpwFCxZw66239tmyf/PNN3nqqadwOp3dWtgevvjiC3x8fGhsbCQ5OZl///vf3HHHHRiNRj744AMvD0pmZiYKhcJrMmpra2PdunXdGieeCa63zJ49m4yMDG6//Xaam5vZvn07EyZM6NMxJCQkOqmurqalpUX0TJeXl+Pr64vT6cTlcuHv709bW5uYDOnJUbDb7djtdpRKpbjKDggIIDg4WJyHKioqUKvVOByOHhPp+0pVVRV+fn7U1dV5ybM3NDTQ3t5OdHQ0x44dY/DgwT0uWgVB4Pjx46xbtw6r1YpCoSA5ORk/Pz927dqFy+WiurqahIQEcZHZ3NxMe3u7eIzPP/+csLAwpkyZQlJSUq+fKyaTiS+++IK6ujpcLheCICCTyUQjy9Mrp6ioCLfbjVKpxNfXV/RouVwu1qxZIwq0QacREhoaSl1dHRaLBbVaTVBQEI2NjRQWFjJ58mQvD5NMJkOj0YhJsM3Nzfj7+xMaGoparRYbHVZUVGC325HJZGi1WqKioigtLUWpVJKTk4O/vz8dHR29+t4XtXGyYMEClEoll19+OSUlJSxfvpz6+nqCgoKQy+UMGDAAvV7P2LFjue6664iPj6ehoQGtVktLSwutra3s3buXFStWUFdXR2BgoJgsVVlZSXR0tCgUdiaw2+1YLBYx8cqD2+0WQwwDBgw4qZz92rVreeaZZ6isrOS7774jNjaWb7/9lieeeEJ0l51M5EYQBPbu3cv+/fv55JNPePPNN3ulZGiz2XjjjTd4/vnnexUOObGr8NVXX83DDz/MSy+9xL333suiRYt4/PHHCQsLQyaTYTAY0Gq1XtnfgiD0mKyalZWFTCbrVfKsXq/noYceoqSkhNLSUmw2G7Nnz+byyy/vdWKWhITE/6ehoYH09HTa2tpwu93YbDY6OjoYPnw4jY2NGI1GIiMjMZlM1NfX43K5qKysRKfT0a9fP5xOJxqNBh8fH3F1X1lZKd7zycnJPVaanA42m00MYXh6zUDnfOx2u6mqqsLhcHTxFpy43/r16ykuLhZTByIiIjhy5AiHDx9Gr9cTExOD0WiktbWVAQMG4Ha7KS4upqGhAY1GQ0REBA6HA4fDwYoVK8jOzu5VJ+La2lq+/fZbampqxDwPnU4nKrsKgkBaWhr19fXY7XZ0Oh0KhQKXy0VVVZVocISFhfHll18ycuRIUlJS0Gq1+Pr6isdbt24dCoUClUpFe3s7HR0dXgtXpVJJaGio6ClxuVxiB+aamhrMZjMGg0E07mQyGZmZmbS0tGCz2QgPD+fo0aMcP3681zIRF7VxIggCN9xwA++8844oS1xcXCyWMe3Zswe73c6aNWv47rvvkMvlHDhwAJ1Ox6effsq0adOYP38+drud2tpaDh06RG5uLgCTJk1i7ty5XoP5TCCTybqs/D0eFegMQXQXdrBYLOzatYtbb72VyspKJk2axKuvvsqOHTsoKiry6kLZG1wuF7t37+a+++7jhRdeYMiQIT16URwOB4sWLeLdd989rQd6SUkJd999N5GRkXR0dPDGG2/www8/MHXqVCIiIhg0aBA6nQ6TyYRcLkelUmGz2cjNze02Ma4v+TPz5s1j0KBBvPHGG6Lqo8Vi6VVCloSERFc8oZdbb70VpVJJZWUl5eXlYlm/QqHAYrEQGhpKcHCw2A3Xbrcjl8u59NJLsdlsNDQ0UFtbS2trq+jl9fX1JSQkpNumdr8Eq9WKSqXyCqlUVlaKi6P29nYxBOPBZDLR0NBATk4OFRUVBAUFUV1djclkoqamhrS0NFpbW5HL5Rw/fpy4uDiMRiPbtm3rcu7jx48TExPDoEGDKCgoIC8vj8bGRtLT0wkPD0epVHaZf+vq6vj8889FL1VKSgp5eXk4nU7a29txu90IgiCqaXu8G57+OWlpaeJ+Op2OyMhINmzYwIYNG0hNTcVoNCKXy4mJiREVY+Pi4igtLaW9vf2UXnVPHx+PB72jo0N8dmk0GrKysli+fDkulwutVit6x3v7rLqojZP58+cza9Ys0VozGAwMHTpU3O7pbOuxON1uNxs3buR3v/sdX331lagjolariYuLIy4u7px0Zfx52MZms3UrpW61WiktLeW9995j69atHDlyRLz58/LyRO9ERkYGw4YN6/IQz8rK8srvaGtrY8eOHTidTvbt20dHRwf79+8X45FpaWmEhYUxYsQIhg8fjk6nw2az8dJLL/HZZ591a5hERESg1+uRy+UMHjyYgIAARo0addKcFqfTydq1a/nss8+YNm0aW7duxW63A505MYGBgdTW1vb4eY9L81Sek5CQEB544AFxklEqlRdlhZeExIWEJzzgCREEBASQkZEhbvc8NI1Go/gwqqmpwel0cvDgQSZOnIhWqyU2NpbY2FgcDodonHjKfc80dXV1xMbGei38bDab+ED3IAgClZWVFBQUcOTIEVpbW1Gr1YSFhWE0GkXvgdPp5PDhw2J+iVqtFues8PBwr2M6HA6MRqOYR6dUKhkyZAg2m421a9diMplQKpX4+fkRFxeHWq3G6XRy6NAhzGYzAQEBpKWlcfDgQVwul6jH4pn/NBoNcrmcwMBAr3C50WgEICkpCavVSlVVFYIgEBAQgMlkIj4+Xgw7xcfHU1ZWhtls7vH3//mi0N/fXwxhCYKA0+kUPeAjR47EZDJRXV2NUqmkvLyciIgIMXH6V1+tU19f3yuZc8/DTC6XM2XKFCZPnsyePXtwOp2iJV1YWMiyZcswm81otVouvfRSEhISCA0N7ZU4TV/4+R/5+PHj4sPZ7XbT0dHB5s2befXVV9m5cydms1kciOPGjeMvf/kLAwcOFA0AhUJx0q7DJ3LLLbcAnd4Dj/dk+fLlFBQUsGXLFurr63nnnXfQ6XTI5XJR6e9EfHx8GDVqlOh5Cg8PRy6Xo9Vqez2xzJ07l6KiIvz8/PjTn/6ETqfjpZdeYseOHVRVVZ30s6mpqQQHB9PQ0NBlW0pKCqmpqaxatYr58+czcOBAMRzVW3E3CQmJnvE0lHO5XN3maHjmgBMXRj4+Prz33ns4nU7q6+uJiYnB7XZz8OBBmpqaxIet2+0mMDCQlJSU007a78v30Gq14txZVVXFTz/9RFFRET4+PgwaNAi32y0mziYkJBAfHy9el0ql6uLhkcvlovfCg9vtFoUfa2trsdvtHD16lPLycux2O+Hh4ZhMJoKCgujo6MDhcOB0OlGpVAQEBBAQEMChQ4fEhWlAQADDhw8XjcOwsDC0Wm2v5t+SkhJ+/PFHKisraW5uRq/XU1paSlxcHDqdDq1W26NnIy4ujry8PGQyGSqVCn9/f+rq6sS/m81mIyAgAJvNRmZmJitWrCA8PJyIiAicTicFBQVUVVVx5MiRXv19LmrjpKKigvXr13Pdddf1ulzWarVy9OjRLh0pV6xYQUREBCqVik2bNnHddddxySWXEB8fz9NPP31GrPkTk5hOZPv27aJXYs+ePUyZMoUDBw6IbtCxY8eyY8cOhg8fzhdffEFkZOQvvhbPDTl58mQmT56M2+1my5YtzJ07l5iYGJKSktiyZQs+Pj5iyKW6upqQkBD+85//MGPGjF8U8lKpVIwePZoXX3yRpUuXMnnyZBYuXMiDDz7InDlzTuo58fX17fHcDoeDffv2oVAouOSSSzAajbzzzjtehklsbCyTJ08mODiYV1999bS/g4TEb5GOjg4UCgUHDx4kNTVVTL48GU1NTWLSrOde9FT+OZ1O0ftaX19PY2MjTqeTjIwM/Pz8fvH1dudltdvtojfHZDIhCALff/89Pj4+DBkyhOrqanbt2kVAQADt7e1cfvnlXt6hvuCpWILOxRNAeno6JpMJk8nE3r17qampEY2F+Ph4cZvD4RC9E4MGDWLMmDGEh4eftp5KTEwMZrOZoKAgampqMBqNBAUFodVqsVgsJ/Usn+h1ksvlXkaMTCbDz8+PxsZGQkJCqKiooKamhvj4eOrr62lqagI6vTy9lffo0xP3ueeeY8SIEfj5+REWFsZVV11FQUGB1z5Wq5W77rpLzMC++uqrqaur89qnvLycyy67DB8fH8LCwli0aNFpuds9SaGjR4/m9ttv57///S/l5eU9Wn5ut5tPP/2U4uLiLsbMtGnTuOWWW7jllltYvHgxr776Kvfffz/Lly/n22+/7fO1dUdERATx8fFiXgt0Zr5/9dVX4muz2SzW3kOny/Spp55Co9Fw3XXXnRHDpDvkcjnjx49n+vTpXH/99fzvf/9j9+7d5OTksH//flavXo2fnx/jx4/n8ssvPyO5OPHx8chkMmw2G6tWreIPf/iDODlA5w3cV32WsrIy6urqiI+PZ9q0aXz//fdiPFWhUKBWqzGbzbS0tHD99dd7ffZcjl0Jid7S3bzrUTX1cC7HrifB87vvvuPtt99m8eLFbN++ncbGxm69k558Oc/97nnIqdVqWlpaGDNmDJdeeimZmZn4+fnhdrs5cuQI69at63MuXXdER0djs9kwmUxi+LygoIC2tjYcDofoCerfvz9DhgyhtLRUTPp0Op0EBASc0QRdD3q9XuyxFhcXR3p6Otdffz2hoaEMGjSIefPmMXToUMLDw9FqtUyePJmoqKhfJPSmVCrx8fGhublZrDayWq1YrVaUSiUOhwNfX99TGoUeJW5P92WlUkm/fv1oaWkRVbhlMhnt7e00NDTg7+/PjBkzuPvuu3stzdEnz8lPP/3EXXfdxYgRI3A6nTz++ONMmzaNvLw8MZno/vvv5/vvv2f58uX4+/tz9913M3fuXDFJyOVycdlllxEREcH27dupqalh4cKFqFQqnn322b5cjtioqaSkhJKSEv7zn/8QEhJCdnY2M2bMIDg4mJaWFoKDgzEYDGzdupWXX34Zi8VC//79vf7Iw4cPF/9fLpczf/58BEHg5ZdfPmN9WNRqNT4+PnzxxRcsWrSIlpYWbrzxxi7Zy55y3wceeAA/Pz/RkEpMTDzp8T1xP+hcqbS1tdHe3i5aqxEREV0aQZ2IXC7HYDCQlJSEQqEgKSlJ3BYZGUlgYGCPN4bVavUSBfJ8j+jo6B7bh6elpXmtatrb2zl69ChDhw6lsLCQRx99tNvP6fV6+vXrR3V1dY/fZcKECQiCwPPPP49KpSI+Pp4//OEPzJw5E7fbzeOPP85VV13l9ZlzOXYlJHpLd/PunDlzvPY5l2NXo9EgCAJRUVE0NzfT2NjI2rVr2bBhA8HBweh0OiwWC76+vmi1WmpqasTqErfbLQqryWQyrr32WjFsHhoayhVXXIHVamXlypWoVKozEorVaDSivlVpaSlut5uVK1ciCAJ6vZ60tDT2799PcXExhYWFREZG4u/vL2pmJSQk9KhN4nK5sNls1NfXA52VTK2trWICrkKhIDo6msTExB71lVQqFTExMWKI50SZCYvFIs5zP58LHQ4HlZWVHD9+3GveVSgUxMTEEBkZ2SWp1ZObUl9fj6+vL4MGDWLr1q34+vri4+OD3W7nkksu6TYZNjQ0FKVSidvtxu12U11d7ZVEfOzYMXx8fDCbzdhsNtLS0qisrGTy5MkMHToUvV6PIAgnlaA4kT4ZJ6tXr/Z6/dFHHxEWFkZOTg4TJkygtbWVDz/8kE8//ZTJkycDsHjxYgYMGMDOnTsZPXo0P/74I3l5eaxbt47w8HCGDBnC3//+dx555BGeeuqpPim6rl27lqVLl7J161aKi4tpa2ujoaGBb775hm+++UbMmVAoFMjlcjF0EhYWxsyZM095fJlMxuWXX96HX6h3lJWVsXr1apYuXcpPP/3U5Zy/+93vePPNN8Wqnvb29h7FgcxmM3l5eezcuZO8vDz27NmD2+2mrq6O5uZmMXlKJpMRFxfHDTfcwJ///OdujRS3201zc7OYSHUiVqu12xLiyspKPvroI7777juxwZ4HjUZDbGwsY8eO5e6772bAgAFeN5hGo+lywwmCQHNzs7ia6A5P0mxPyGQy5s6dy4EDBygpKeHKK6/kk08+8fL2PPjgg0yZMkV8fa7HroREb+lp3vVwrsfuwIEDOXr0qBgO0ev1Ygnq+PHjEQQBf39/bDYbTqdTDGvk5+eTkpLi9aD/eT6fTCZDp9Nx9dVXIwjCGZODl8lkhIaGsnHjRpqamsQKovj4eA4fPozL5cLHx4fLLrtMXDQdPXqUwMBA0UDySB148jQOHz5MVVUVbW1t4gP3xGR9z9wmCAKhoaFMnz69WwVam81GRUUFOp2uiwS+xWIRk0w9ybCCIHDw4EG2bt1KR0eHKMlgNBoJDg6msbGRvXv3olQqGT58OGPGjPGa+zzH97Q8EQRB9IKo1eoevUS+vr5e38lgMJCdnc3333+PIAg4HA5iY2OpqakhMjKS0tJSL2+/IAhs27atyzOvJ35RzolHqdPzEM3JycHhcHDppZeK+/Tv35+4uDh27NjB6NGj2bFjB4MGDfJKJJo+fTp33nknR44c8aq28WCz2cQyUEB8SKampvLaa69htVppbGxk48aNbN26laKiImpqasjKyqKpqYnm5mZR+0Ov1/PII48wceLEX/LVfxEWi4UFCxZ0qdDxGCbvvvuul/Gg0WjQ6/VinsgLL7zAsWPHGD58OMuWLWPXrl3dVvv8nKNHj/Lkk0+ybt06XnjhBXHweqiurmbfvn08+OCDXT6rUqnQarXU1tZSWFhIZWUlx44d49VXX6WyspLAwEDsdjuRkZGUlZURGhrK22+/zV133cU777zDV199xRNPPMGtt9560gRjT87IggULTvpdTlbmFhERQVZWFm+88QZRUVG8+uqrXjdnY2Njl+94rseuhMTp8nOF5HM9dsePH8/EiRNpaGigpqaGgoICcSXuqWBRKpUoFAoUCgVWqxWTycTgwYOZOnVqr0K1Z7pip729HZvNRlNTk6hJYrVaKSwsxOFw4OfnxzXXXOO1CNRoNCQlJVFVVYXJZGLTpk0EBARgNBopLy8nKSmJUaNGsXPnToYOHYparRZl2g8ePCh6qg8fPixWKBYWFqLVapk4caL4HYuLi4mJicHhcHQxxjzzlkajYd26dQQGBtLY2Ijb7SYqKoqjR4/idDppbm7Gx8eHuro60XMyceJEtm7dyooVK5g+ffpJF3RNTU1kZmbS3t7eY3GF5+/pCbV5FsEymUxclCYmJortRSZNmuRlmBw+fJhNmzb12uA8bePE7XZz3333MXbsWDFRqLa2VlT9O5Hw8HAxwdEjE/zz7Z5t3fHcc8/xt7/9rcdr0Wq1xMTEsGDBAhYsWCBmO3syjz3/ALGq5HzTnTGRmZnJm2++2a1Xw9/fX/ydx4wZg8vl4ttvv0WlUjFu3DgEQaC4uJiKiopu47Se7Oq0tDSCgoIoKiryuhHdbjcvvfQSISEhDBo0iNbWVlEDADrLcvv160d+fr4oZfz2229TUFBAeHg4er1elE6GzrLuSZMmcc0114hiat9++y1ZWVmi6FtkZCQGg6Fb0bhT6RxkZ2fz8ccfd7ttyJAhhISEcOTIEaZMmUJ0dLS4rby8XPSqnMj5GrsSEn3BM++OHj2anTt3Audn7Or1evR6PYmJiYwaNQqbzYbRaMRsNhMREYHFYhFzGaAzpB0bG3ta0u1nAk8Pmbi4OCIiIqisrBS3yWQyZs2a1a13uq2tDYPBIErvexTGS0pKyMvL48iRI5jNZlER98Tz5efn43K5sFqtaDQaVCqVmEzs8cZ4Gg/a7XauuuoqcnNzkcvlpKWliSGhLVu2iA38PNVDw4YNY/fu3ajValFMLTAwkLKyMoYPH87x48f59NNPEQSBlJQU9u3bJ3qKQ0NDRS+5pzBEEAT69+9PeXl5j8aDwWDA39+f+vp6FAoFHR0d5OTkIAgCcrkcmUxGQEAAZrMZmUxGWlqa+Nn9+/ezZcsW8ffoDac9Uu666y5yc3PZunXr6R6i1zz22GM88MAD4uu2traT9kBQqVSixalUKs/bDfFzrFarKN3rESPKzs7mb3/7GzKZjD//+c8nlWb3WNoTJkwQcypOxGg0kpOT0207a0+tfHBwMEqlssvqJTc3l6VLl/KXv/wFjUbD1q1bKS0t5bbbbhMHnlwup6mpieLiYmbNmsWQIUP45ptv2L9/PwAJCQmUlpYSHx/P73//e4KCgnjjjTe8znPieUNDQ7sYJx6FyRMHdnfExMR0q1sil8v53e9+R0tLC3l5efzhD38Qz+l0OnniiSfIyck56bHPJH0duxISJ8Mz765atYr09PSzeq7ejl2FQoGPj49YkQKcNLftXOOZD2NjYzl27JhomOh0OpxOJ6GhoV59Z07E8+BVKBRkZ2d7KaN6qK2tJS8vTyxiOBFfX19iY2PF3LugoKAujQfdbjd+fn4EBQWxfv16nE4nERER4r6CIIhz69y5czly5Ag7duzAYrHg4+NDVVWV2NOoX79+jB07luHDh3sZASdWpwYFBSEIgpf4XWNjY5e/YXcEBATQ0NCAIAhERkbidDpF9drY2FhsNhuBgYFERESISbWNjY2sX79elMTobQXWaT217777blauXMnmzZu9Ws5HRERgt9tpaWnxsuLr6urEJJ+IiAh2797tdTxPVvmJiUAnotFozrjWyPnCMzCjoqL46KOP2L59O9B5M0+dOrXHz3RXHfNzAyM4OJhp06b1+ZocDgdPPfUUYWFh/P73v0cul3e5FpvNJia0rVy5kpkzZxIREcEf//hHcR+n04nD4eiixNgTer2ecePGUVZWBnR6wMLCwrBYLKfsnzN27FjmzZvHsmXLvN5XKpUMHjyYoqIi7Ha7lzT/d99916O3RRq7Ehc6J867njYbII3dU+GZJ+12Ow6Hg0suuURsZWGz2ejXr1+Pei0/13JRKBRe+i3Q6YHKzMzs83XV1tZSUFCAw+Fg+vTpBAUFMXfuXFHeHzoNK5vNRk1NDdXV1VgsFgYPHszAgQNFY6i1tZXW1lax141CoUCj0XSRy/AQExMjykN4KpM8ia2nqsIcNmwYVVVVmM1mampqiI2NFQ240NBQampqCA0N9epv9+OPP3olwXpKo09FnwJ7giBw991389VXX7Fhw4Yu1SPDhw9HpVKxfv168b2CggLKy8tFNdYxY8Zw+PBhMbsZOhNbDQbDWV8JnG9UKpWYeDZv3jzCwsL4+uuvxXbePQ0MlUrF0KFDvX6zM0ljYyO7du0SO4d2R2lpqVg2vnbt2m4HmFKpRKfT9dpTJZfLvdRyo6KicDgcvXL7BQYG8vvf/76LgeZpQrV9+3ZSUlJEoaLGxkb++Mc/9lh5JY1diQsVad79ZahUKrE81tfXV+wH09raSlNTU48GWHh4uNgb5mxQXl4uJhN7cug8hoWHkpISsZmixWLh+PHj4n6eHjsRERFiv5/e5HMYDAb0er3o8YLOufibb745ZXVU//79iYmJEfNMEhISxDlYoVBgNBqx2+2i06KgoICSkhJRpV0mk53SO+OhT8bJXXfdxdKlS/n000/x8/OjtrZW7KcAna6j2267jQceeICNGzeSk5PDLbfcwpgxYxg9ejTQqSeSnp7OggULOHjwIGvWrOH//u//uOuuu34VVvrJUCgUKJVK5HI548aNY9++faxcuRLoTCQ6mSs0KyuLNWvWnJXrqq2tpampiQULFvRoIEVHRzNs2DCgs9po1apVZ+TcJ670pk+fLibPeXoNnYzo6OguLsLo6Gji4uLYtm2blxT2woULu1WU9SCNXYkLle7m3RM1TKSxe3I8De2ioqIIDQ3l4MGDQOec61nxd4dHgsFms52VJqH19fW0tbXh6+tLVFRUt/skJiaKhkBgYCAHDx78xbpKHr0nT+6lj48PISEhaDSaLto43REWFuZVNeTxMPn7+1NeXk5ZWZloJOXk5OB0Or2qmLpLO+iOPhkn77zzDq2trVxyySVERkaK/z7//HNxn9dee43Zs2dz9dVXM2HCBCIiIvjyyy+9fpiVK1eiUCgYM2YMN954IwsXLuTpp5/uy6Vc1MjlcsLDw3n88cdFD8SIESNOKkE/dOhQNm7ceMarPQRB4LvvviM8PJwZM2b0uJ/BYOCJJ55Aq9XicDh45plnui05Pl1CQkL405/+xLZt27DZbF1EprojLCysS7MuvV7fbS5Kb4wpaexKXIh0N+/+PEdCGrunprW1lfb2dnJychg5ciRWqxWdTudVln0iMpkMf39/9Hp9r9qk9AWz2UxlZSVms5lBgwb16PFITk5mwIABHDp0iEGDBlFSUtJjt/a+otVqKS8vF/NnBg8eLOaTnAxPzopMJqOjo0P8f8/C1vN5u92O0WgUt3uMkxO7z5+MPod1uvt38803e33ht956C6PRSEdHB19++WWXmGZ8fDw//PADZrOZhoYGXn755QsmafVc0d7e7tVj4MSKku6Ii4sjPDyce++9t1dehd5SUFDA+++/z6uvvtqjroiH8ePHixnfR48e5d133/3FCo6RkZFkZmby8ccfk56eLrpQ//vf/57SwpbL5V3GzfDhwzGZTKIKr8Ph4O233+5xAjoRaexKXIh0N+f+vJxYGru9o76+HofDgVKppKWlxcuD0B0pKSmUlJTw7bffsn379l6v+k/F3r17KSsrIyEhgaysrJPum52djdPppKOjg/j4eLZu3drrB3xPBAYG4na7SUtLY9SoUTidTvLy8qivrxcrLnvC4402GAxMmzZN7HrscDjEXnRtbW0sXbqU5uZmscrI02G+t8nSZ779o8Qp8Qj8nCzM8HMUCgUvvfQSTU1NjBs3jmeffbbXiUU9YTKZePTRRxkyZEivEmnVajX3338/Wq0Wt9vN008/zV//+lexb8LpMHXqVLZs2cKMGTOQy+Wi7kpxcfEpb0B/f38GDx7s9Z5cLufw4cNUVFQgCAKffPIJ999//1nL15GQkLg4cLvdlJaWolAo2Lx5c6/mreDgYPHhvXnzZpYsWSJqi5wulZWV7Nixg/DwcEaPHn1KA9GTU5KXl0d8fDxqtZolS5aQm5vb67LcnzN79mymTZvGFVdcQVhYGHq9XhSm88j590R0dLQounfi71BYWEhqaiput5uvvvpK9PL/3HPSW8V1yTg5h8hkMoKDg0lPT6e2trbPAzwkJISPP/6Yu+66i88++4ypU6fy3//+97RuFLfbzV/+8hcOHjzIK6+80uvyrvHjxzNp0iT8/Py4++67+eKLLxg3bhx/+9vfTtsAOPHcHg+HxWIR23n3RE9VTE1NTbjdbvbu3ctTTz3VbYmfhITEbwNPTk1hYaH4EPZ0Re4NycnJzJs3j8DAQCwWC3a7nQ0bNpxWWLu1tZVvv/0WHx8fsrKySEhIOOVnZDIZ2dnZuN1u9uzZQ1VVFR0dHXz11Vd8/fXXHDt2rM/XodPpxColpVKJWq0WvRunWhR6QlAnGhqeXJLS0lKUSiXt7e20trZ2GyKSjJMLEIVCwXXXXcdNN9102rLM/v7+PPbYY2zfvp3XXnuN1NTU01JTbGpq4vPPP+fyyy/vsca/O9RqNTNmzMBqtTJt2jTWrl3L/PnzaWxspKqqqs/X0RMBAQG9CsV4StZOxOOmbW5uPuOxYgkJiYuL0NBQBgwYQFxcnFhF0tck4MTERBYuXMjMmTNpbGxErVafNEewJ4qKirBarTidTrHBaW8IDw/HYDAQHh5OYmIi48ePx2Aw4Ovr+4s81ycyduxYgoKCxArHntBoNGi1WgRB8MqBVCgU1NfXExISctLnW29Dib+tgOMFwA033ADA66+/7vX+0aNHvfoxnAo/Pz/GjRt32texbds2TCbTaR1j1KhRCILA66+/zhdffHFWkupOVPU9Gd2VQf7aqw8kJCR6j1wuZ+zYsdjtdsrLy2lsbESj0aBQKLBYLJhMpl7lQeh0OtLS0k4pENkTTqeTsrIytFotPj4+J23B0d13iIqKQi6XU1NTQ3R0NHfeeSdut/uM9fQymUw4HI5TejY8Ym3t7e2iYSSTyVAoFNjtdlwuFyqVCrvd7tWLx7PfgAEDenU9kufkHCOXy5HL5QwbNszL4+FRjj0XFBYWct9995GWlsb06dP7/PmUlBSGDx/Ojz/+yIcffnhGr83T+dhkMnUray8hISHRV/R6PUFBQaIMfH19vVii+0tLc3vLnj17aGlpQS6XiyXefWHAgAE0NjYyatQo1q5diyAIaLXaM9KHyNfXl927d9Pe3v6Lcxmrqqqw2Wxdck08rwcNGtSr40jGyXkiJibGy1pvbGw8I+3BT4XJZOLuu+8mJCSEjz76qEcVwZMRHBzMww8/jEwm47XXXhNbep8J/Pz8xOzuxsbGUwogdZdzEhMT0+scGgkJid8OHo0Oj2Kp0+k8JzlpZWVl7N69m7a2Nvr379+nULqH1NRUtFotNpsNtVrN4cOHz9j1aTQaHA4HLpeLqqoqysvLT2q0eVqgnOjd9vy2nqqcn1fpeF73NgwlGSfnidDQUK8H6LkwTKBT7yM3N5f33nuv1xZsd8ycOZPRo0dTXl7Ohg0bztj1VVdX43A4EASBL7/8kpkzZ7Ju3boe9+9OHyY5OblP8VwJCYnfBh6Njra2Nq/S7LOJIAhs2rSJ1tZWMjMzvToS9wW5XE52djZ79uwhLS2NQ4cOnbFrb21tJSAgALVajcPhICcnhw0bNvR4/Li4OGQymahYC53qsZ4+bCd6Sn7+urc6LZJxcp7w8/Pj2muvFV8XFBScsoTrl+JwOPjPf/5D//79u5Tg9hWbzYZSqUQQhDMmCgSI1np4eDiPPPIIWq2WN954o8ebRKVSdSth//MOrRISEhJJSUkEBASgVCpJSUlBEIQzmsjfHR5FX6VSSVhYGC0tLadtVFgsFlpaWigpKaG5ufkX60x5cLlcjB49Gn9/f+Lj48nPz2f37t09PpMUCkUXz4lOp+uVcXJin52TISXEnieqq6vJzMxEp9OJ7cXPduzT5XJRXl5ORUUFl19+OQEBAUyYMIHs7GxSUlJ6naDlcrnYsmULu3btOuPXuHfvXgCuvvpqDAYDWq2WoqIiTCaTFKqRkJD4RRw7dgyj0YhGoyEtLY3CwsKzHtYxmUyo1Wr8/f353//+Jy6eEhISGDBgALGxsb2qYGlvb2fv3r2o1WrKy8vPSK6J5/o6OjpwuVy43W6Sk5PZtm0bWq0Wo9HYo7z/2UYyTs4TxcXFvPLKK6jVaiwWC2azmZKSkh57LJwpPOI5nj49n3/+OVqtlmHDhnH11VfTr18/IiMjiY2Npba2tttS3HXr1vHBBx+IaolnKltcEASqq6uRyWRkZWXhcrno6OigsrKS1tZWyTiRkJD4RVRWVhIeHk5jYyO5ubkolcqzLtAol8tpaWkhNjZWDJ90dHSwb98+cnJyCAwMxN/fn8DAQMLCwjAajd0KdDY0NNDe3k5wcDAWi+W05Sh+jt1ux+12097eLub8aTQagoKCeswP6W1V6S9BMk7OE0lJSVRXV4sy1J622GcTrVbLTTfdxKJFi3C5XMhkMrRaLRaLhe3bt7N9+3bg/5eKWSyWU1YRRUVFMXfu3DNyfe3t7ezfvx9/f3+GDBmC1Wo9ZbJtREQEMTExFBYWAnDo0KGz1kVUQkLi4iY4OJi8vDz8/f1RKBS4XK4z2iOsO2JiYoiKiqKgoIDg4GCam5sRBIGEhASxtNloNFJSUoLb7UYmk3XrFZHJZKSnp9PS0oLRaCQrK+uMtB+orq4mICCA2tpakpKSaGtrQ6fT9dgEFjrnXU+iK3Q+v/qieN4bpJyT80R0dDRjx471es9jHJxNbr/9dmbPni0O6u5in2azmcbGxlMaJhqNhpdffpmMjIwzcm2tra10dHSQlpZGVFQUTqeT5uZmYmJieswh0el0Xi24jUZjrxUIJSQkflukpqbicrnQarXU1NQQFBSE0Wg8bRn43qDRaJgxYwZqtRqj0YhWq8XX15eOjg4GDx5MSkoKiYmJBAYGAp0ClImJiV7//Pz8MBgMtLS0iEbEhAkTzsj1tbW1ib9HXFyc6JXxeGm6w9fX1yunxOVynfHfUDJOzhNyuZxp06YRFRUlShgXFBSc9bwTvV4vtl8fNWrUaR3Dk9j13HPPcc0115yxa1uxYgUNDQ2EhIR4rQhCQkK8DJCT0draesYUEyUkJH5dBAcHEx0dTUdHBx0dHVitVqxWa5dGimeauLg4rrnmGmJiYpDJZKKhsWfPHoqLiyktLaWlpQWFQkFbWxulpaVe/1pbW2lubqa6upro6GiuuuqqMxLWcblcHDhwgMzMTJxOpzjPemTsTyVOJ5fL0ev1yGSyX6yP8nOksM55JDIyEqvVKg6yoqIi7Hb7We8UqtfrmT9/PjNnzqS4uJjdu3fjcDg4fvw4BQUFJ/1sRkYG2dnZDB48mNjY2DMae/Rkhk+YMKHXyV5KpZL+/ftz4MABoFOW/2xXPUlISFycyGQysVxWoVCg1WpFwcfw8PCzeu64uDhuuOEGSktLKSgowGKx4Ovri81mw2w2n3QuNRgMREREkJCQwJAhQ3q9WDsVnp44ra2tqFQqgoKCeqVbFRAQgFarxel0iuGdMx3WkYyT80hGRgZ2u12MedbX11NeXk7//v3Pyfn1ej2ZmZlkZmaek/P1Bh8fH6ZMmdLr/eVyudgv40R601BLQkLit0d4eDgdHR2i1olcLqeqquqczLtyuZykpCSSkpLE9zwFESdDr9ef1bYcx44dIzExsdfeGF9fX1QqFVarlfb2dtHQ02g0XobWibL1nte99bBIYZ3zSHR0NOPHjxdf9yYB9deK2+3m6NGjxMXFnXbvihM5VwaehITExUVqairNzc3I5XJRm+N8zrs6nY7g4OCT/jtbhoknnNXY2EhKSsovOpaPjw86nU5SiP01oNVq+fe//811112HQqHA6XSyf//+831Z54Xdu3ezceNG5s2b10Vvpbi4uNcZ9RqNBo1GQ1lZ2dm4TAkJiYucmJgYZsyYgc1mE5NTa2trz5lK94XE7t27CQgIQKPReHmblUolISEh1NbWdvs5t9uNIAioVCqxqkcQBLHZ38lE2HrbMkUyTs4zMTExfPDBB0yYMAFBEM56WdvJMJlM1NfXd1vtcjYTTZ1OJy+++CIOh4M5c+Z02d7e3t5jJnhRUZHXYA8ODiYsLOysaxdISEhcvGRkZHDttddSXV1NRkaGqNl0rrHZbBQVFbF3794u12AymcjNzeXAgQM4HI4zfu76+nqKiorw8/MT+/Z4OFFmojt++OEHmpubCQ4OZsqUKbjdbgICAnplnPQ2X0bKObkA8PHx4ZprrmHjxo3n/Nx2u52SkhI+//xzvvzyS+rr61m4cCHjx48nJiaG+vp6vv32W9auXYvdbueOO+7g2muvPaM5Hd988w2rV69m4cKFXvkvCoUCnU5He3t7j9olmzdvJi0tTbwB1Go1Go1G6mgsISFxUjwSBedaesAjmV9QUMCRI0dE3ZPNmzcTGRmJn58fHR0dlJeX09HRgUwmY8uWLYwaNYphw4adkYIJt9vNhg0bSExMpLKykqlTp4rbVCoVLpcLpVLZrXquzWYTw2BtbW3id5LJZDidTrHbMyAWNpz4+ue90HpCMk4uEEaOHIlOpztnK363283hw4dZtGiR2C3T49Z88cUXeeWVV1AoFF1aij/66KO8//77PP3008ybNw+1Wi0OyNMpbWtvb+f5558nODiYxx9/3OsYSqVSFAfqaeVQXl7upRxrtVqxWCyMHTuWr7/+us/XIyEh8dtAoVAQHR1NfX09giCIYZ6zidlsZu3ateTl5eFwOAgKCsLPzw+VSoXNZqOyslLsOuxyufD398fpdGKxWNi7dy+FhYXMmDGDkJAQmpubcbvdPWqRnAxPbx673c6wYcNEjRXozIHxLAa7690jCAJhYWFs3rwZf39/NBoNMpkMjUaDv78/TU1NKBQKBEEQxT5PfF1RUdGra5TCOhcIqampREdHs2bNGtrb28/quRwOB2+//TaTJ09m7dq1tLa2dom3ulwu7HZ7F90VmUxGWVkZt9xyC/fddx9ms5klS5awaNGi09Jo+eabbzh06BBPPvkkcXFxXtt0Oh0JCQk4nc4eM7yPHj3qdd7a2lqqqqrIzs4+Y7L6EhISv06SkpIwm83nJE+toaGB5cuXi3mFQUFBhIaGis0Iw8PDiY2NJTMzk0mTJhEWFkZUVBQqlQqlUklwcDDl5eV8+umn1NTUsGnTJr7++us+pwK43W62bduGn58fbrebYcOGeW0PDAzEbDajUqm6TRS2WCz4+PiIXpGwsDCUSiV1dXWivIQnJ8XTCPDE1yEhIb26TslzcoHg5+fHoEGD2LBhA0aj8az0kREEgT179vDmm2/yxRdfeIVKoqOjqa2txdfXl5SUFOLi4qipqRFdi+3t7WLYZMCAARw4cID33nsPf39/Ro8ezYcffsh1113HiBEjen09TqeTpUuXEhQUxGWXXdbjfm63u8fmXJ7mXZ74qCcpKyYmBn9//zNeey8hIfHrITIykvb2djFP7XQqBZ1OJ3a7vdtcCkEQ6OjoYPPmzRw5ckQM0wwZMoQjR45w9OhR1Gq1OBfL5XIEQfCSsY+JiaGlpYXDhw+j1+txOp0sX74cnU5HW1sb27dvZ/bs2b2+3vr6evEZM3jw4B5l6lUqVbclzj4+PjgcDjIyMmhtbRXzAV0uV68Mj96G0STPyQXEmDFjaGtr49ChQ6f1+S1btvDqq6928YKYTCaWLVvGXXfdxbRp0/j444+75HA0NTXhdrvx9/cnLS2N7OxsnnjiCbZt28a2bdu4+eabUSqV2Gw2UfDM7Xbz8ccfM2zYMN544w0SExP7dL1ms5n8/HwsFgv5+fk9el4cDkePOSQxMTEYDAZ8fX3FfZctW0ZMTMxJDR4JCQkJPz8/dDodOp2O48ePn9Yxtm/fztq1azGZTOJ7nvDFkiVLePvtt8nJycHpdBISEsKUKVPQ6XR0dHQQGhrKnDlzCAkJQavVkpqaSkxMDGlpaQwaNAhBEGhoaECr1aJSqUTRNqPRSEJCAjExMX2WTWhoaMDX1xeNRkNubm6PXmlfX1+v7+RBo9Fgt9txOBzY7Xb27NkDdGqlJCcnn7Lsubfhf8k4uYDIyspCpVJRWVl5Wp8/ePAgr7/+uvggb2ho4P3332fOnDnceOONvPPOO93KNMtkMqKiopg6dSpZWVlYrVaOHz/Ohg0buPnmm4GepfWrq6v5/vvvuemmm3rtrvs5ra2tXH755dxzzz1s374dQRAwmUwUFxcDnauPngyXfv36cfz4cS/XZm5uLjKZzEtDRkJCQuLnKJVKIiIiaGtrw2KxdJtjcSry8/M5dOgQx44dAzrnyg8//JD//ve/GI1GoqKiCA0NFb3hW7duZceOHUBniOSHH34Q+9jExMSQnJxM//79cblcGAwGlEolHR0dpKeni3O1QqHg+PHjzJ0797T0SWpra3G73VRWVvLBBx/w3XffYbPZqKioYMeOHWIIpicvh1qtJigoyCtp1hMCCg4O7rZKx+MJ6q1mixTWuYBISkpCr9ezefNm/vjHP/Zawt1DaGgoLpdLvMEefvhhPvroI699FAoFGo1GlC2WyWTMmzePf/3rX4SEhHhZtS6Xi6qqKqKiohg1ahSRkZFUVlaya9cucR9BEPj666/5/e9/3+csck8LcOj0orz77rt8+umnvPfee2RlZfGvf/3rlHksAwYMICoqyus9T3xz0qRJBAYGSpU7EhISPeIxTlpaWmhvb++xyWhPaDQaMYG1tbWVlStXEhgYSHJysljk4Ek+hf+vmqrX65kyZQqRkZFifpxKpRIFKceNG8fWrVtpbGykoaGBgoICcX5PSUmhrKyMxsZGIiMj+3S9nuaoLpcLtVpNR0cHBw8exGKxEBwcTGtr6ykVa4OCgrr18Mvlcvr3709VVRXgrQzrKSPureCdZJxcQMTGxjJz5kw2b95MQ0NDn3s9ZGdn8+yzzxIUFAQgWuoGg4GxY8cyYcIE0tLSGDhwIHa7nZycHGQyGVdccUW3N6RCoRCTVO+66y52795NU1OTaAx0dHRw6NAhduzYwebNm5k8eXKfrvfAgQNdPDltbW289957jBgxguXLl58yOTg8PJyHHnpIbBoInSsZo9FIQEAAer1eMk4kJCR6JCMjg927d6PRaCguLmb48OF9+vzw4cPZt28f/fr1E6tSBEHg+PHjuN1uDAYD0dHRREVF4XK5KCsrQyaTMW3aNOLj47s95siRIwFITk4Whc48PcPa2tpwOp2Eh4ezf//+PhkngiCI56+trUWj0aDVapHJZOTn5zNgwADa2tpQKpWUlpb2eJz09HTKy8vJycmhvb0dg8GA0WikqqoKPz8/0QA7sZQ4ODhYDAX1Bsk4uYCQy+WMGzeO5cuXU1VV1WfjJDY2lptuugno9ETs37+f6dOn89xzzzF48OAusb6MjIxeHzs5OZnk5GQA7r77bqDTs9LU1MTevXu9ekX0BpvNxrJly7p1G5pMJkJDQ4mMjMThcHQb9zyRwMBAL6+NzWbD5XIRHBxMRkZGr0vXJCQkfnv4+fnh6+uLw+Ggurq6z8bJoEGDGDBgAEqlkpKSEqKjo2lqamL48OEMHTpUDH+cDsOHDyczMxO73S4+1D3hl4qKCmJjY/t0vIaGBqqrq8XCAYPBQGNjo2hEhISEUFNTg1arxd/fv8cmqmq1mpiYGHJycnC5XKJR5nA4SEpKQqlUepUS+/r6olAoaGpq6n1T1z59M4mzzqWXXoqfnx95eXldSrz6glwu5x//+Ieon3I2UCgUhIWFMWvWrD5/trS0lNWrV3e77ejRoxQXFyOXy39Rh2GZTHbWOzxLSEhc3CgUCtLS0ti0aRM+Pj5iCKIveOYZrVZLSEgI48aN67PhcLJjK5VKr2qg8PBwsrKy+nysw4cPY7PZxJySn4umFRcXY7PZSE9Pp7Cw0Ev/pLf83PgwGAwoFArq6ur69LtKCbEXGNHR0SQmJrJs2bJeu7+6Q6vVMnHixDNumHis41/KW2+91aNHRKFQ9DnueyIWi6VXbb8lJCQkAOLi4vDx8aGlpaXHfjK9wVNYcKYME+j0ULe3t/9igU6TySRWWkKn51yr1WIwGMQwjK+vL2azGb1e3+teQzKZjICAAGQyWZcWJ55KSk+jxb4YJ9Ky8gJDp9MxYsQIli5dSmFhYZ9CL6eLp3NkbW0tBw4cEOvW5XI5gwcPRqvVYjQa2bx5MwcPHiQ0NJRnnnmmz6sLD83NzaxatarH7RaLhdLSUgYOHMjevXtPebyWlhav3jsOh+OsC9lJSEj8evAIiQUFBZGbm9vnJNPe4slH8XTndTgclJeXU1JSIi5G5XI58fHxqFQqTCYTx48fF4Xibr755i6NUXvL8ePHxXkxMTGRgoICbDYbERERJCcnc/z4caxWK1qttlcGWnt7uxiy8fX1RSaTdenF4+fnJ1ZSeqp3eqtzIhknFyBXXXUVH374IWvWrDkrxokgCKxYsYKioiJsNhvbt2/HbDZz9OhRURLZg5+fHwqFAofDIWZZP/LII7/o/KtXrz6ppoDdbqexsbFLFU5P7N+/36tW3+Fw9NikUKvVYrVaUSqVuFyu32QnUgkJCW/0ej0xMTFiI75Jkyad8ZCw2Wxm8+bNFBYWit6QE0ttT8STjCqXywkMDBRzYvpawelBEARycnKAzqrQ5uZm8XgNDQ34+PiI4moqlapbyYmfH6+0tFS8Hk/DvxMXhZ5WAFartdt+O6dCMk4uQNLT0wkKCuK7777jnnvuOeMy7G63m9zcXF599dVTehh+vj0jI4M//elPp+018YiknagnkJKSwpAhQ3C5XOzcuZOamhrxXB5r+2QUFBR02cejkeIhMDCQp59+mjFjxpCbmyv2hnj++edP63tISEj8uoiLixPbYdTW1hITE3NGj2+328VS5aioKKqqqtBqtRQVFeHj44PZbBaVYX19fVEqlbS2thIZGSn21OltR9+fU19fT3V1NXK5HIfDIS7mPEUSFRUVJCQkYLfbCQoKorGx8aThe0/ZtNvtpqamRuzDZjKZxDyWwMBAGhoaCAkJISEhgerqavz8/MT5/VRIxskFSHBwMGFhYRw/fpy2trbTFjfrCYVCwZNPPsnkyZN54403xNLlkyGTycjOzuaDDz7o0gOnL+zatYu1a9cCEBAQwI033sjjjz9OZGSkaI0vX76cAQMG0NTU1CvjpDs3Yb9+/YDOkrzvvvuO4cOHk5WVhcvlYsCAAbjd7tNK9pKQkPh1EhISgslkws/Pj4aGhjNunAQEBDBz5kw2bNjAoUOHMJvNGAwG5HI5crmc6Ohorx40HmPl6NGjTJs2jaFDh572uXfu3IlMJmPQoEEcPHgQPz8/Jk+eTGJiIoIgkJ+fT15eHv369aO2tpaKiopee5U9Hhi5XI7BYBCbGQqCgNVqJS4uDqfTSVhYWJ9yFiXj5ALC7XZz7NgxPv/8c4qKihg+fDgGg+GsnEsmkzFhwgTGjRvHsWPH+PLLL9m0aRMbNmzwGjwymYzExETmzp3L//3f/+Hv73/a5xQEgf/973+i2M/ixYu57LLLRDef51wPP/wwAG+++Wav4pMnSxzOzs5GpVKxfv16Nm3a5LXtdBoVSkhI/HoQBAGj0Uhubi6HDh0iJiaGioqKM74g9KDX67niiitobGzk8OHDbNiwgba2NgIDA2lra0OlUuFwOETDwN/fn9mzZ5OcnHza3mqTycSxY8fQarWUl5czZMgQpkyZ4tW/LTs7m1GjRuF2u/nggw/w9/fvkj9yIp4y4e5CUiqViujoaIqKiggKCqKkpMRre2/zASXj5AKgsrJS7DD5008/ieWzl19++VnvrCuXy2lubmbz5s20trZ6xQOVSiX33HMPDz/8MBEREb/4XDU1NaxYsQKVSsUjjzzC5Zdf3u1+FouFbdu28d577wGdN+jJcm9OdhOFhoaKOTOSMSIhIeFpj5Gfn8+RI0eoqanBbrcTGRlJWVmZKGl/NgkJCUEQBAIDA8VOvQEBARQXFyMIAkFBQYwbN45+/fqddgKsh6NHj6LT6TCbzfj6+jJjxgwxH8SD1WqlsrKS3NxcmpqaGDJkCPv37+9RJM5jnHj+/+dzqydHJj4+nujoaHG/0tJSSb7+QsdisbBnzx4+/PBDNm7cSFVVlZeXIDY2lquuuuqcXEt0dDSDBg3ihx9+ICwsTHx/1KhRPPPMM2esHPmDDz6gurqahx9+mHvvvddrm91u59ixYyxZsoTVq1dTXFwslhpnZGT0WeTN0zvinXfe6dLkUEJC4reHpx3HgQMHOHLkCHa7HUEQUCqVYuO9pqYmJkyY0GOn3jNJVFQUR44cISQkBJfLRVFREdApp3/55ZefkbCS3W5nx44dZGZmUlBQwNy5c1Gr1WKn97q6OoqKijh8+LCYBBsUFERFRQVut5vU1NRTnkOlUolNYS0WC01NTeTn5wOdpcQeWQiHw0FJSUmvve+ScXIOcTqdFBcX880337Bq1Sp27drV7ao/PDyc559/XsybONvExMTwwgsv8Ne//tUrUdUjbXwmyM/P59133wU6OyC/+OKL4jaj0cj27dvJy8vr4vKTyWTMmjXrpJNFd4PdYrGQl5fHp59+KlXkSEj8hqmvr+fIkSPk5ubS2toqrvJlMhkajYasrCyOHDlCeXk5qamponT82aZ///7Ex8dTWVnp9RwICgo6Y/kuO3fuxGazkZOTQ1tbG0uWLBG3uVwuOjo6xNCMr68vkZGR+Pv7k5eXJ+Y+dodcLvfy6nsW1i6Xiz179mA2m4mOjiYnJ8dr/pXJZL0W1pSMk3OAzWZj9erVfPTRR2zevNmrg+6JyGQyBg4cyD//+c8+96k5E/xS92FPmM1mHn30UTFL+/333+/V5xQKBQsWLOAPf/jDSfcbNWpUl8RZQRCorq6mvb2d2NhY5s2bx5AhQ8RtO3bs4NNPP5X0UCQkfoW4XC6qq6v56aefKC8vx+VyodVqxW66Wq2W1NRUiouLxQ7BGRkZzJw586yH0k9Ep9P1yjtxOlRWVrJt2zbsdrvoLTlRckEmk6FWq1Gr1YSGhmK326mpqaGkpERMmO0pBKNUKgkNDaWlpQWDweDVv6yhoYGEhASOHz+OXq8XK4xsNhvl5eVSQuyFgNlsZsWKFXzwwQfs2LFD/KOoVCrS09OpqanxUv274ooreO+99/rcU+dCxuFw8PLLL7Ny5cpef8bHx4cBAwZw7733Mm/evFOGlbpLFCspKWH27Nm88MIL3HDDDVitVr7++mtMJpOYjCbloEhI/LrwGCXr1q2joqICq9WKQqHAz88Pl8uF1WrFbDajVqsRBIHKykp8fHyYMWOGKF3wa6Curo6vvvoKh8NBeHg4GRkZHDhwAL1ej8lkwmAwUFtbi0KhwG63c/z4cWQyGTKZjNjYWGbNmtXr55CnpBg6m8EOGDCA48ePc+mll2IymSgoKBBz/lQqlaRzcj5pb2/n66+/ZsmSJWzcuBGXy4VOpyMzM5NRo0ZxxRVXkJ2dzZ/+9Cc+/vhjoNPF9/bbb/9qDBOHw8GOHTt4/fXXWbVqVZeqmxNbaSsUCoKCgsjKymLmzJlMnDiRxMREr2zyk+FZCZ3oGs3NzSU+Pl7sWPznP/9ZkrSXkPgVU1paKubvCYKARqMRdUM8lYCpqamsXr0avV5PeXk5CoWCSy+9lEGDBonHOZ3eOhcK9fX1bN++nfz8fIKCghgwYAA1NTVs2rRJrExyu900NDSI2iRqtZqwsDCSkpJITk4mKSmpS5PY7vCUCwuCgF6vx2w2Y7VaSU1NZfTo0axevZr8/Hwx50+pVDJ27Fgv783JkIyTM4jFYmH37t384x//EI0StVrN0KFDef7558nOzha9AIcPHxb1PnQ6Hc8991wXRdQffviB8ePH9/ohfaFw8OBBnnrqKdasWSMaDFqtFpVKhcViwel0MmzYMK655hrsdjvjx48nKSmJ8PDw03KpJiQkEBAQ4GWcxMbGolAoyMvL4/7775cMEwmJXymlpaXs37+f/Px8sYu5Xq/HbreTmZlJZmYmERERaLVa1qxZg9PpZPTo0WzdupXo6GgyMzOx2WxoNBqsVisbNmxg8ODBZ1zn5GzidDrZvn07W7duRa1Wk5KSQnh4ODt37sRisSAIgij0VldXR2BgIAMGDCAuLo6EhATCw8N7ZZCcSFRUFHv27MFgMJCamsq+fftQqVTo9XpycnI4cuQIcrkcPz8/0XNVUFAgKo2fCsk4OQN4MqKfeOIJ9uzZg8ViQafTMW3aNO69916ysrK8DIy6ujoWLFhAbW0tcrmcxx57jNmzZ9Pa2srnn3/O7bffjkwmY/HixVRVVXH77befx2/Xe9xuN8uWLePxxx8XS/KmT5/O1q1biYmJ4aqrriIqKoqWlhY2bdrEm2++SUZGBgkJCaSnp5/RWO+sWbNobm5mwYIFVFRUdNk+YcIEYmJi+PTTT8/YOSUkJM4Nbrebqqoq9u7dK1be2O12dDodgYGBpKWlMXjwYJKTk8UwwuHDh8nPz2fYsGHs378fmUzG1KlTKS8v59ChQ0ycOFH0tOzYsYM5c+ZcFF3NrVYrX331FWVlZaSmplJSUkJ+fr4YviouLsbtdqNUKklISMBgMODn54fNZkOn0xEcHNxnw+REVCqV2FsnLCxMlMZwOp2MHDmSpKQktm/fTn19PWazWUqIPRd4lPXuv/9+tmzZgsViQaVSceWVV3L//feLAmAnYjabue222zh48CAAAwYM4O6770apVFJdXc13330nGiNTpkzhb3/7G6NGjWLw4MHn/Pv1BbfbzX//+1/+/Oc/iyXAcrmchx56iDFjxvD000/zwgsvoNfriYiI4Oqrr+aKK65g+fLl3HbbbURGRnLbbbcxf/58+vfv/4uuxZOs9dlnn7F///5u99m6detp96mQkJA4f3i8GwcPHsRms+FyuXC5XBgMBgYMGEB2dnaX8Pjx48f54YcfsFgs4up93rx5YqO/kJAQLBYLISEhVFdX09zczE8//cSUKVPO07fsHVarlS+//JLGxkbCw8Ox2Ww4HA5iYmKwWCwYDAb69esnysobjUbUajUymQyDwcCGDRvYunUrgwYNYsyYMb3WIPk5nsICrVbL9u3bcblcxMfHU1RURE5ODmq1muTkZBwOBxqNhjVr1pzymNLsfJo0Njby8ssvc8kll/Djjz9it9uZPXs2y5cv54svvmDixIldDJP6+nruuOMO8Q8TFhbGa6+9Jsqo5+TkeIUmZs+ejdvt5q233vLqunsycnNz2bRpE2VlZb3Oiv6l/NwwSUlJITs7G7vdzpNPPsktt9wiVsp41ApffvllNmzYwJIlS7jnnnuorq7miSeeYOLEifz973/vsXGfhxNLnvV6PcnJyeJrf39/4uPjWbx4sVcFj0qlIjY2loSEBAYPHtzrxoISEhIXBvn5+Xz00Ufs2bMHpVKJTCbDx8eH9PR0br31VubMmdPFMCksLOR///sfQUFBaDQaOjo6GDZsGGlpaUBnON4jmaBUKhk4cCAqlYri4uJTtvWATs/5hg0bWLFiBVu2bKG6uhqn03nWJQw8HpOSkhISExMpLi4WF73V1dXExMRQVVWFRqMhODgYf39//Pz88PHxIS0tjYaGBiwWCx0dHWzdupV///vf5Obm9qgLZbVaqa2tFb+bR+Cyo6OD/Px8r+72iYmJ1NTUiI1kzWYzBQUF1NXVSb11ziZFRUUsWLCA3bt3IwgCKSkpPPHEE1x99dU9NmYqLCzknnvu4ccffwQ6kzifeuoppk6dKu5jtVq9Ekejo6O56aabePXVVxk5ciS33nqrKBzUnf6Iw+HgoYceYuPGjfj6+jJkyBDGjRtHVlYWiYmJotqfXC4/Y2XDDoeDjz/+mHvvvVf0mISEhHDttdeyfft2du3axebNm7n33nu59dZbRaNi8uTJVFRU8Mwzz/DGG2/g7+/Pq6++Sn19PU888QSff/45119/PXfffbeXhL8gCLz//vv873//Y9SoUYwZM4YJEyZw/fXXs337dvH3q66upqioCIVCQWZmJmPHjmXGjBlERERQX1/PoUOHMJlM/P3vfz8jv4OEhMTZw+l0snv3bjZs2ACAWq3GYrEQFxfH1KlTu80PcbvdbN++nR07dpCYmIi/vz+NjY3079+fqVOniqEMTy8bD4MHD2bfvn1ERUWxZs0a5s+fT0NDA4IgEBsb2+U89fX1HD58mPT0dIqKiti0aRO+vr6EhYUREhJCdHQ0MplMlHX3CL/9EnHL+vp6fvzxR4qLi0lISCA3N1c8h06nQyaTUVtbS1RUFBUVFaSnp1NYWIjRaESr1VJRUcFll13GunXraG9vJyMjg/r6er788kuCgoKYPn26V0jMYrGwYsUKamtr8fHxITg4mMGDBxMeHk5TUxODBw9mz549VFRUEB4eTn19PQ6HQ9RP0Wg0NDU1iX17eoNMuAgVqtra2vD396e1tfWs9Z7piT179nDjjTdSWFiIwWDgjjvu4J577jlp8tTu3bu59tprxRbT48aN48UXX2TkyJFeWeEvvfQSq1atYt26deKgaGpqYuLEiVRUVPDll19SU1PD0qVL+fvf/05WVlaXrPK8vDwWLlzIgQMHRENALpej0+nEfhG+vr5MmDABi8VCUFAQw4YNIzg4mEGDBhERESE2ceoJT/fJ9evXi4quJ3p2rrvuOmbNmsWCBQsAmDNnDv/5z38YN24cR44cAWD+/PkMHjyYFStW8OWXX5KQkMDq1at56KGHyMvLE6972LBh/O1vf2Pq1KmoVCpKSkoYO3YsSqWSmJgYDh06xKBBg/jggw+46qqrKC4uJisri7Vr17J582bcbjeJiYksXryYXbt2UVRUREtLi9hgCzin4+h8jl2JXw/nYxydr7HrcDj47rvvKCoqwt/fn7q6OgICAhg3bhxDhgzpNl9CEAQ2bdrEjh07iI6Oxm6309DQwNChQ5k4caK4iHQ6nSxZsgS1Ws2ll14qytbn5uaya9cuEhISaGlpQaVSiSJt48eP91qECoLA7t27OXLkCG63G4VCQWNjI1arFZlMJnqwPV4eT/6Hv78/er2exMREoqOj0ev1XX5XuVwuzlVGoxGj0SgaAXa7HV9fX9RqNa2trWg0GgRBQK1Wk5SURG5uLpdccgnr16/Hx8cHQRDo6OggMDAQjUbD8OHDSU9PZ926deTn5zNo0CAsFouYyDpw4EDGjx9PWFgYW7du5dixY0CnfkpSUpLY8K+kpITIyEhqamq47LLLxIa1arWasrIy/P39CQ0NFWX6N2zYwDPPPHPKcfSLPCfPP/88jz32GPfeey+vv/460Ln6f/DBB1m2bBk2m43p06d3KZEtLy/nzjvvZOPGjej1em666Saee+65Cz75aNWqVfzpT3+itLSUrKws3n333R5vDg9Go5E///nPlJaWEh0dzV133cXdd9/dbQVOYGAgRUVFNDQ0iL9XcHAwTz75JAsWLODuu+9myZIlZGZmMmfOHG644QYeeughQkNDxWOkp6ezdu1aNm7cyJ///GdRFr+jo8MrS9pjAHhQKpUEBgaSnJxMYmIiWVlZmM1mHA4HCoWCpKQkioqKEASBvXv3UlZWRkFBgVd4BWDYsGE888wzrFq1Snyvrq4OvV7PjBkzRONk//79vPPOO9x3332iF2fmzJmkpaXxyCOP8OWXX+J2u9m7dy/z5s1jxowZPPHEExw+fJiGhgb8/Px4/fXX+frrr3nllVf49NNPmTRpEsXFxVgsFmQyGdOnT2fZsmVceeWVlJWV4evrS2pqKrfeeiv79+9n3bp1Xtf+ax67Er8ePPPuifxax67VamXt2rXk5eWJImKjRo1i7NixJ61iLCwsZMeOHahUKqqrq/H39+faa6/1Cv9Cp9CjzWbj2LFj9O/fXzRO+vfvL4Y4iouLxTy4Xbt2UVhYyKRJk+jfv78YWho5ciT+/v7s3buX2tpacTGYlpaGTCbDbreze/dubDab6M3weJoLCwvF0mc/Pz+cTqeYmOvv7y+W3prNZi91W61Wi8FgoKGhAblcTnZ2Nrm5ubhcLvz9/XE6nYSHh6PX65HL5aSmprJnzx4yMjKIjY0lOTkZpVIpSuVv2LABu91OUFAQNpuN3NxcCgoKGDVqFHl5eeICNDw8HKfTSXJysthMtbi4GB8fH3x9fZk8eTIrV66krKyM8ePH09jYSH19PVarlaqqKiorK3v1tz/tUblnzx7ee++9Loma999/P99//z3Lly/H39+fu+++m7lz57Jt2zagM1fgsssuIyIigu3bt1NTU8PChQtRqVQ8++yzp3s5Z5X29nbeffddXnzxRZqbm1m4cCF///vfiYuLO+VnFy9ezN69e5kwYQL//Oc/xdyL7hg1ahTNzc3s2bOH2bNni+9feeWVXH/99SxevJhbb72VlStXMmzYMB588EFWrlzJ3XffzbXXXktgYCAymYzAwEBCQ0NJTk6mqqoKPz8/fH19aWxs7FF4zOl00tDQQENDAzt37uSzzz7z2v5zBdbuGDZsGP/+979JTEz0UsH1uBmvv/56/v3vf9Pe3o7T6UStVncJLyUlJbF48WICAwP5+OOPsVqtWCwWvvrqK7Zu3cqdd97J3LlzWb58Oc899xzvvPMOOTk55Ofnk52dDXTGpdevX09hYSF//etfcTqd3HLLLSxatIjY2Fi2b9/O0qVLiYmJ8bpRfo1jV+LXhWfezcjIIDc3V3z/1zh2S0tLWbNmDbW1tQQEBOB0OpkyZcpJ51DozAHZtGkT/fr1o6ioiDFjxvSY7OnRP2loaODo0aNkZWUBnYu1qVOnsnjxYsxmMwcPHmTkyJE0NzfT0NDA2rVr2bFjB6NHjyYxMRG9Xk9SUhKVlZWUl5djMBioqKgQG93pdDoiIiKwWq3I5XIxlKTT6TCZTKhUKnGObWlpISwsDKPRSFtbmxiqlsvlyGQyFAoFgiDQ1taG0+nE5XIxceJERowYwf79+/Hx8cHpdIrh+/T0dPbv3y+GkSIiIsR8G+g00LKysoiKiuLzzz+npaWFIUOG4HA4KCgoEIs9fHx8SE1Npbq6mlGjRrF161ZxcRoREYHD4WDr1q1iTopSqWTXrl0MHTqUfv36YTabqaysFPvunIrTSog1mUzccMMNvP/++2IyJ3S6xz/88ENeffVVJk+ezPDhw1m8eDHbt29n586dAPz444/k5eWxdOlShgwZwsyZM/n73//OW2+9hd1uP53LOascOHCAuXPn8thjj2GxWHj99dd55513emWYmEwmfvzxR5566im+/fbbLjfVz70OERERhISE8N1333nlnqjVap599lmmTJnCkSNHuPbaa+nfvz9r164lMzOTRx99lFGjRnHPPfewdu1ampubOXjwIPv27QM681seeOABli1bxrPPPktUVBRBQUEMHz4cX1/fXv0OvTFMPvjgA4YPHw7g5U2aOXMmCoWCjIwMMjMzgc7GfMuXL+/2WHq9nrfeeovXXnvNy33a0NDAK6+8wpYtWwD49ttvefvtt7niiis4duwYAwYMEN2gTzzxBC+88AJOp5OgoCCxb9ATTzzB7NmzaWxs9DJMfo1jV+LXxYnzrqeZGvz6xq7b7Wbz5s18+umnYmK/Xq/nhhtuOKVhAlBRUUFISAhGo5FrrrmGiRMnehkmP1+khYeH4+vrS1NTk9j8Djq91rNnz0ar1aJUKiksLCQsLIyBAwciCAJms5m1a9fy/vvv88UXX3Do0CEaGxtRKpVibkp9fb1YKVNUVCQaLIAYZpHJZKKHIz4+nrCwMNRqNZGRkYSGhqLT6fD19aVfv34MHjyYlJQUUXoeOmURJkyYgEKhQCaTiecKCgoiNDSUjIwMnE4nHR0dhIaGcuDAgW6TXg0GA1dffTUJCQns37+flpYWpk+fTmJiIjqdDrfbTWlpKQqFgn379qHVaomNjUUul2Oz2RgwYAAVFRXU1dUREhKC3W5n5MiRqFQq1q5dy+rVq8X+Rr3htIyTu+66i8suu4xLL73U6/2cnBwcDofX+/379ycuLk7sX7Bjxw4GDRrk5W6cPn06bW1tosv/59hsNtra2rz+nQvWrVvHvHnzWLduHdHR0bzzzjv86U9/6jHp9efo9XqWLVvGX/7yF6/mdIIgcPDgQR599FGvXI2QkBAxRvjzni8REREsXbqUGTNmkJOTw1VXXcXhw4f56KOPWLt2LSNGjOC///0vs2bNYvTo0TgcDt577z0CAwMpKyvjkUce4aGHHqK9vZ2goCAMBgNPPfUUW7Zs4bPPPuMvf/kLc+bMwd/fn/79+5OWloZcLqdfv34ndfsGBweLgmtDhw4V309ISAA6mwdOmDAB6DSyPO5Rq9XKX//6VzZt2uQlkHbw4EHuvPNOnn76aWbNmsXbb7/tZQh2dHRQW1srvn755ZcpKChgxIgRYu4MwJEjR0R36KxZsygpKWHGjBm89tprOByOLpVMv7axK/Hr47cw7zqdTr755ht++ukn5HK5WIlz/fXXiyGXUxEfH8+AAQO48cYbSU5OFvPyPKrVO3fu9ErK9Oxjt9u7JGv279+fq666SvR+lJeX09jYyJVXXikmjIaHhxMcHMyBAwfECpjc3Fyys7PJzMykf//+XrmB/v7+ZGZmEh4ejp+fnygjYTQaycnJwWg0UlZWRlVVFRaLhbCwMCIjIwkKCqKkpISjR4/icrkIDg5m7ty5TJw4EblcLibDNjc309TURFxcnBiu12g0FBUVIZfLKS4u5vvvv2f37t1iCGnTpk28/fbbfPvtt6IwW1VVFT/++CMul4vExESUSiUOhwO3201rayuNjY0kJiYyZswYsZLHYDAQFRVFfn4+crmcmpoaNm/eTFtbG0FBQWRkZJCRkdGrv2OfjZNly5axb98+nnvuuS7bamtrUavVXlY9dFqmngdKbW1tl1Ivz+sTHzon8txzz+Hv7y/+6y5j+kyzadMmfv/734v5JStWrGDBggV91sbwhFqgc0VQU1PD4sWLmT17NsOHD/equpHJZPTv35+ysjLWr1/f5VgtLS1MnjyZ++67j+bmZm677TbuuOMOoqKi+Pjjj1m7di3Z2dmUlJSwaNEiL+MnKSmJjo4OnnvuOXJzcyktLWXBggUsWbKEI0eOEB0dzbvvvsukSZNIS0vjiSeeQKVS9RgKkslkDB8+nC+++IInn3xSTLb14PnMiBEjvAbjrFmzRK9KdXU1M2fOZOTIkXz88cd8+eWXXHnllbz77rs899xzPPnkkyxcuFA0Ek8szQ4LC+O+++7D5XLxzjvvoFarxZyZE0lPT2fcuHEsXLiQqqqqHv9Ov6axK/Hr47cw75rNZr7//nvq6upwu904HA7GjRvH/Pnz+1TZolQqSU9PFxeRZrOZ3bt38+mnn3Lo0CFUKpXXgsfHxweVSkViYiIHDhzwOpYnzGI0GqmpqSEqKoqQkBC+//57YmNjueGGGzAYDBQVFTF48GDGjx9PTEwMvr6+WK1WGhsb0Wq1OBwO4uPjmTZtGk6nk2PHjokiaSNGjECv16NWq8XeM57Qj16vJzAwkJKSEnbu3InJZEKpVDJs2DBuv/120tPTvZ5JgiAwbtw4dDqd6GXS6/VERUXR3t5OZGQkgYGBHD58mFWrVrF06VI++eQTtm7dSkREBIIgUFtby/Tp05k8eTJyuZyKigr8/f0xGAy43W7q6+uRyWQEBARw+PBhoqKiUCgUNDc3I5fLaWpqEsNNxcXFhISEMHToUCIjI7s0HzwZfXrSVlRUcO+99/LJJ590W8p6tnjsscdobW0V//W2FOl0EASBlStX8rvf/Q6j0chf//pXNmzYIMYifwk7d+5kwoQJLFq0iAULFjBv3rwu+1xzzTUYDAY+++wzL6PAZDKxYMECHn30UeLi4vjmm28YO3Ys//3vf5k9ezZr1qwhIyODVatWsWzZMsaOHUtzc7Oom5KcnMzYsWMZNmwYCQkJKJVKWlpaeP311/nHP/7BPffcw9ChQ/n+++/54Ycf+MMf/oDNZuvSQVmtVpORkcHrr7/OmjVruu2e7HQ6WbFiBdC58jhxYpkyZYqXyJonSeqmm25i/vz5lJWVoVAouOOOO3j11VeRyWSkpqaydOlSHnvsMTEJrrGxkbi4OBYuXIjL5UIQBHQ6Ha+//jpz5swhOTmZIUOGkJWVxVNPPeXlnVEoFCiVynMiwnYux67Er5PfwrzrKVVVKBQkJycTERHB7373OyZMmNBFL6qv7Ny5k6qqKvH6x4wZ4xV2ViqVZGZm0t7eTlNTk5eC6bFjx9i6dSuhoaHo9Xra2tooKipi9OjR7N+/n59++onBgwczffp0CgsL2bJlC4GBgYSEhFBTU4NcLufYsWMIgkDF/2vvzKOjKLP+/0nSnc6eTiArWxIgbNlI2BLZZRUlKExQBFEUB0RFEOUMOCLvK7KIzigMjPoTEEYElFEgBEJkTQgQCISQhaxkhexk7U6v9fuD0/USCRAgCsH6nMM52l3pqqfqVtV97nPv9xYWEhsbK0Y3QkNDGTZsGEVFRWJ/ML1ej16vR6lUEhwcTGlpqahdAogClqblppspKyvj+vXrlJSUIAgCzs7O4nf9+/fH3NxcFKAzaZIUFBRQUlKCXC4Xo8khISG4u7szePBgZs6ciZubGxcvXkSv1+Po6IhCoRAnp42NjXTu3JmwsDD0ej01NTXU1dXR2NiIQqEgICCAhoYGkpKSSEtLIz09/fdJiE1MTKSsrIzg4GDxM4PBwIkTJ1i/fj3R0dFotVqqq6ubePGlpaViSM7d3Z2EhIQmv1taWip+1xwKheK+levuBZ1Ox+7du3nnnXfQarX861//Ytq0aQ8k7Xsz/fv3Z/fu3VhZWYlhst/SpUsXBg4cyNGjRykpKRFLlLVaLUVFRRgMBr744gvi4+PZtWsX33zzDRs3bmTKlCkMHz6cVatW8dxzz/H0009z8eJF1q9fz9GjRzl27BharZaXX36Zjz76iMLCQrZv386RI0fw9fWltrYWQRDo3LkzFy5cuKX/gVwuJyAggA8//JCRI0feUSclJydHvAmnT5/e5DsHBwcWLVrEggULmnjQpgZSJsfk008/beLUKBQKPvzwQ4KDg3n11VeprKxk5cqVLFq0CAcHB86fP49Go2HgwIH8+OOPqNVqTp8+zYQJE9DpdLi4uFBeXt4ksdeU1+Ps7NzmbVfi8eV2z114PGy3oqKC/fv34+zsjI2NDcXFxUybNq3VeooFBASQnJxMWFgYffr0abZ8tUePHiQmJqJUKsnKyhIjwXV1ddTV1REYGEhKSgrdunWjf//+REVFiVorUVFR+Pr6Eh4eTnV1NRcvXiQzMxOj0UifPn1wdXXl7NmzdOzYEYPBICbJ3qxSrdfrsba2pkuXLshkMmpqakhLSxOfid26dSMkJARvb+/bOmupqaliPkr37t2bPKO9vb3x9fUlPz+fxsZG2rdvj7+/P+fOnSMoKIiUlBRUKhVTp07F1dVV/Dt3d3emTZtGTEwMmZmZBAYGkpmZSVFRkaiZUlFRwciRIwkMDKSsrIzo6GhsbW0pKysjKSkJQRCwtbWlc+fOWFpailVKd+Oepo5PPvkkly5dIikpSfzXr18/XnzxRfG/5XJ5kyWJjIwMCgoKCA0NBW54rZcuXaKsrEzcJiYmBgcHB3r37n0vh9NqCIJAfn4+S5cu5ZVXXsFgMLB161amT5/exDHJzs4Wb+j7wfSC9/X1va2BWVhYiH1hTMlscCM0161bNwByc3P59NNPsbW15d133yU+Pp4PPviAM2fOMHr0aBYuXMilS5fEPJTz588TGRnJhAkTxKhQYWEhy5Yt4+TJk/z3v//l0KFDbNu2jYULFzbJj1EoFIwYMYKffvqJX3/9lYkTJ4pGfzsVxD179lBVVUVISIh43W/mpZde4sCBAwwePFj8zMzMDC8vL5YsWXKLY3LzuQkPD2f37t14enpSVlbG8uXLUavVZGVlcebMGXE7Ozs7jEYj5ubmfPDBB/znP//hu+++48KFC1y8eJGkpCSxE2lcXFybtV2Jx5/mnrum/K62bLv19fUkJCSwadMmKioqMDMzo6GhgWeffVZ0TARB4MKFCyQnJ9934m779u0ZOXIkI0eOvG3XdxcXFxQKBTY2NqJkgulzCwsL5HI5NjY2HDt2DGdnZ2bPnk1wcDCNjY20a9eO4uJiNm/eTFpaGsOHD2fOnDmMGzcOnU7H2bNnEQSBU6dO0dDQwMCBAxk6dCgvvPCC2IHdpFUCN57vlZWV6HQ6fH19iYiI4Pnnn8fX1xegSRmyCYPBQEpKCn369KGwsFAsTDChUCiYNGkSgYGByGQyqqqqxEloXFwcMpnsFsfEhIODAxMnTqR79+5cunSJHj164OnpSW5uLiqVShSAa9++Pb1798bDwwNLS0s0Gg2Ojo64u7vz0ksvERERwbhx4/Dw8GjRdXtgEbbhw4cTFBQk6pzMnTuXqKgotmzZgoODA2+99RYA8fHxwI2TGBQUhKenJ2vWrKGkpIQZM2bw2muvtbikrbXEgHQ6HZcuXWLv3r1s2bKF/Px82rdvL+aE/Javv/6aL774gtmzZ/P666+3ODH2Xjl//jxDhgzhxRdf5OuvvxY/f++991i7di1ww1k5fPgwAwYMAG7cxImJiaxYsYI9e/bg7OzMyy+/zJw5c8SEL51OR15eHnv27OHbb79Fp9PRs2dP/Pz8OHPmDOnp6VRWViIIAh07diQiIoKJEycSEhIiOgsqlYr09HSOHz9OfHw8q1atEp0muOGwPPPMMxw7doytW7fyl7/85bbjrKioYMaMGRw8eJB27drx5Zdf4unpiZmZGQqFgt69e9/2+sbExPDaa69RUFAgftavXz/27NkjytLHxsYyZcoUsaSwX79+7Nq1i7KyMv71r3+xbt06NBqNaEdtyXYl/twMGTKEuLi4Nmm7paWlnD17loKCAqqqqvD39yc7O5vu3bvz1FNPNYko63Q6Dh8+TFpaGg4ODjz55JN06dLld1mSjYqKoqamhuLiYubMmYOdnR1qtZoNGzagUqnE8t+goCDx/aBWqzlx4gT5+fn4+flRU1NDdnY2QUFBBAQE4ODgQGlpKZcvXyY5OZnq6mqxE/K1a9dITU1FqVTi4+NDaWkpOTk52Nvb4+PjQ0hICJ06dRKrZAoLC0WRN7lcLkonwI28oR07dtC3b1+uXr1KRETEbcXpsrOz2b17N4IgoNVq0ev1+Pj4iIJuPXr0oEuXLre833Q6HXv37hUF2/Lz88Uk6fDwcHGyFxkZyeXLl6mrq8Pb25va2lqeeeYZysvLiY2NJSEhgV27dt3VjlrdOTGJAf3www9NxIBuDh3m5+czd+5cUeZ35syZrFq1qsViQA9ykxgMBoqKijh79ixbt27l119/FZX8XnzxRd566y369+/f7N/q9Xo+/fRTPv74Y/r27Ss25WstKXgTarWa0NBQampqOHXqlHjufvnlF6ZMmYLBYEAmk7FgwQLWrFnT5G/r6upYt24dn3/+OZWVlbi6ujJu3DgmTZpESEgInp6eoud87tw5ysrKiI2NpaysTCwLHzFiBOPGjcPFxQWNRkNxcTEZGRlkZGTw/fffYzQamT59Os8880wTxwRuGObUqVMZO3YsO3fuvOt6cUpKCuHh4eTm5jZ54MhkMnx8fBg/fjwLFixoNhlv3759zJo1q8ka8WuvvcYXX3yBjY0NKSkpjBgxgoqKCkaMGMF3331HbGwsS5cuFcv54P8UYh9125WQMPFb5+RRt92qqipSU1O5cuUKxcXFKJVK7OzsqKioEJc/Ro0a1eyx1NbW8ssvv6BUKikuLsbd3R0/Pz+6dOnSqp3Ms7OzOXDgAA4ODvj7+4vLaP/5z38oLy9HoVBga2uLRqNh5syZTZa8ioqKOHDgAC4uLnh6eorjdHFxwdvbm27duokim1euXMHS0hILCwtSU1PF94+rqyv+/v50796d69evk5eXx/Xr17GysuLatWv079+fkpISHB0d6du3r3iuBEHgp59+wsHBgczMTCIiIm4bITIRFxfHiRMncHV1paysTNRFMTc3x8LCAltbW4YPH35LZY1OpyMqKork5GQsLCwYPHiwKHZnqqg6fPgwxcXFXL9+HXt7e/z8/MjIyODKlSsolUqys7NZt27d7++cPAzu5yYxGAwkJSXxxRdfEBkZSW1tLQaDAWtrawYOHMizzz7LrFmz7upo3OygGI1GevTowYsvvsgLL7yAm5vbAydvwQ1je/vtt9m4cSPbtm3jhRdeAG4kPJkk67du3YqXl5fYL+dmjEYjGRkZ7Ny5k8jISC5evIggCCiVSgYMGMCrr76Kj48P3t7eTc6PhYWFKJSTl5dHYmIiJ0+eJCUlhXbt2jFs2DAGDx5MeHi4WLJ7M2q1mqeeeoqMjAwOHz5Mr169WjTeXbt2MXPmzGabG7Zr147t27czZsyYZv82MTGR119/XdR0MTc3JyIigtWrV9OpUyex1HrNmjWcOnWKGTNmoNFoUCgUeHt7c/ny5T+FBLjE40Vbka+vq6sjLi6OrKws3N3dcXFxoaysjIyMDOzs7LCxsSE8PPyuof7a2lr27t0rvnSzs7MxMzMjKCiILl26tHip4E7U19fzzTff4Ofnx9WrV8XqzMTERJKSkmhoaMDW1hZ/f3/69+9/S+sQlUrFmTNnuHTpEubm5mLl4PXr1ykrK8PZ2VnstWPSSVGr1SgUCjGCW1dXR2lpqRipsbCwYNiwYZSWluLn50f37t1veccUFBSwZ88e0TEySTfcCYPBwM6dOykpKcHZ2Rl7e3u0Wi1ubm5kZmZibW0t6rz8Fp1Ox9GjR0lISMDa2hp7e3vMzc2pra1lxIgReHl5sWfPHgoLCxk+fDh5eXk4Ojpy4cIF5HI5Fy9e/GMiJw+De71JKioqWLNmDf/+979paGigY8eODB8+HFtbW1544QUGDRp0T06FwWDg8OHDLFu2jMTERPR6PS4uLoSEhLBgwQKGDBkiZjTfL6dPn+bJJ58kLCyMAwcOIJPJ0Ov1TJo0iSNHjnDixIm7VhCZRH7i4+PZv38/p0+fJjs7m7q6OuRyeRMBPRM6nU5UJfTx8WH48OEMHz6cJ554QmxgdTvWr1/P+++/z+eff86cOXNaPNaamhoCAgLEJRqZTEa3bt2YOnUqffr0QSaTERoaipubW7P7z87OZubMmZw7dw5BEDAYDPTq1YshQ4bQs2dPSkpKRIfJzs6O+fPnExYWhre3Nx07dnzkH/ASEr/lUXdOBEEgPT2dEydO0LFjR1Gj49q1a5iZmdGtWzf69euHh4dHiwsO6urq+PXXXyktLaVnz54oFApycnK4fv06Hh4e9O3bF09PzwdqqBcZGQncSOqPiIjAw8OD0tJSfvjhB7y9vbGwsGj2hf3b48zKyiIzM5OrV6+KZd5VVVUoFApRTt9UbmxKLFUqlajVajGS4ubmhr+/Pz169MDe3r7ZZ5+p8aq9vT319fVMnz69xe+y5ORk9u7di7W1NZ07d+by5cvo9XpRxdZU8ejv799sYnRiYiIHDhwQpfeDg4PJz88XOxHX1tZiZWVF7969sbS0FFu45ObmsmjRoj+3c2LqAzN37lwuXrxIz549+etf/8pzzz0n5iU8CPX19Zw9e5Y9e/Zw8OBBsrKyRKGx8ePHi70c7mfZp66ujv79+1NUVERMTIyY2BYdHU14eDhr1qzh7bffvuNv5OTksHbtWpYtWyZKJxcXF1NQUMDZs2cxGo2kpaVRWFiIq6urmGRnZ2fHwIED8fHxaTZC0hzZ2dmMHz+e3r17s3PnznsqeTQYDGzatEks+R04cCBvvvkmmzZtIjExkYaGBjw8PPjoo49Eye3fYirxEwSB1NRUfvjhB3JycsjJyWmStDt37lxWr16Nvb39I/+Al5C4HY+y7TY2NhIXF0dBQQHdunUjPT1dzGXw8vJCqVTed76e0WjkypUrXLhwgaKiIuzt7QkMDKS+vl6U8zf1B+vcufM97yc9PZ3Y2Fi8vLzQaDQ888wz4rKJWq2mtraWefPm3XGSFhcXR1lZGX369KFz586UlpaSn59PWVkZV69eRRAEsZmfmZmZuJytUCjw9PTEx8eHrl27tujYExISuHjxIhqNhkmTJt2xAe1vqaurY//+/WRkZODi4kJNTQ0ajQZLS0t0Op14fKbl9d9qWcGN5356ejpGo5GqqipKS0uRyWQ4ODigVqtpaGjA2tqa2tpawsPD8fHxISoqihdeeOHP65yoVCo2bNjAqlWr0Gq1LFiwgEWLFmFvb8+ZM2f49ttv+fDDD+/pYt6J69evs3PnTtavXy8q+Nna2tKpUyfGjBkjJpY6Ojo2a9hVVVUcPnyYYcOGiRnTa9euZfHixQwZMoR9+/aJ3vGIESNYuXLlLUqRv6WyslKUNv7HP/7RrMNgKuE1MzO770hPbW0t48ePJy8vj5iYmCbZ/9XV1dTW1tKxY8e7JrHl5OQQGRnJtm3byMnJuUWsR6FQ8Pzzz7N8+fJml7NuxnSzLFq0iP/85z9i6aVcLqd79+6EhITg4+PD8uXLH8kHvITEnXhUnZOSkhIiIyNxd3fHxsaG/Px8wsLC8PLy4vjx4+Tk5DBs2LBWqRBSqVSiOJmZmRl9+vTBycmJw4cPo1arEQRBVIvt2rVrsxGVtLQ00tLS6NatmyhLv3nzZsLCwsTotY+PD5mZmZw+fRo7Ozuee+65Ox5XSkoKBw8exNHRkUmTJjVpzGqSL6irq0OlUmFlZSVWR97rMzgrK4tjx46JImvDhg0T95GRkUFlZaWYlHs7dDodZ86cISUlhcbGRuzt7Wnfvj06nY6ysjLRuVCr1YwePZrAwMDbHqPRaKSiooKSkhKOHDlCY2OjmJOYmpoqRlgyMzPZsmXLn9M5ycnJYcmSJfz000+4urry1Vdf8fTTT4svx6NHjxIeHk5wcDBbt25tUZ+cllJVVcXJkydZt24dx48fF8vf5HI5Pj4+PPfcc0ydOrXJPk3NnMaMGUNFRQXz58/npZdeAmDixInExsayfPlyFi9ejFwuF1tUtyR8uWnTJt5++23WrVvHyy+//EBLTc1hNBpZv3497733Hl988UWT5ZyrV6/y3HPPkZOTw5QpU/joo4/umqgFN2ZeZ86cYd++fXz77be3OClBQUEsXrwYT09P+vXrd8cZRn19PUuWLGHdunW33eZRe8BLSNyNR8050ev1nD17llOnTuHp6Skua48cOVLcdt++fWi1WlQqFT179iQ4OLhVNKT0er1YDVNRUYFcLhd1mgwGA2q1GicnJ3x9fZvkppiZmaHRaDhx4gQWFhbY29szePBgZDIZR44cISwsjLi4OKZMmYKTkxMJCQl4eXndNb9Fp9OJjVNlMhmTJ09udb0YtVrNjz/+iI2NDfX19bzwwgviPi5cuEB0dDQajQZbW1ueeeaZJo3+mkOv11NQUEBKSgpZWVmoVCqMRiN2dnY4ODhQUlKChYUFnp6e2NraMmTIkDu2EygqKmL79u1otVo8PDwoKioSJ8FpaWl/vpwTQRCIi4tj9uzZZGZmis7Hb710g8HAd999x1tvvYW7uzsbN25kxIgRD5zMavIc9Xo99vb2nD59mh9//JGtW7c2abRka2vbREvEzMwMPz8/Ll26xNWrV5HJZPTs2ZMVK1aICbepqamsWbOGefPm3VMZXW1tLSNHjiQ/P59Dhw416X/zoNTW1rJ+/XpWrFhBcHAw+/fvF69HSUkJL730EjExMeL2o0ePZtu2bS1yUODG+bx06RLbtm2juroajUZDTEwMpaWlYsvwLVu2EBERccffKS0tZe7cufz888/Nfv+oPOAlJFrKo+ScmBqcFhYW4ubmRnFxMf369SMsLKzJM7W+vp6dO3fi4uJCbW0tdnZ2PPHEE00iC/d7XNnZ2WJZ7OnTpykvL8fT05OuXbuK8v75+fm3tOKwsLDAaDSKOX3m5ub06NGDdu3aUVVVRceOHcnKymLq1Kn3tFSdmZnJ8ePHcXZ2RqlUMnLkyFaZGJqWtY4cOSJ2ph45ciRdu3bFaDSSkpJCdHS0uBxfU1ODTqcjPDz8rg6KCbVaTVxcHMnJyWIvM3d3d3x8fNBqtWi1Wuzs7Jg2bdod30XJyclERUWh1+uxs7PDy8uL5ORkUlNT/1zOiVqt5vvvv+f9998XE4NWrFhxWy/XYDAQHR3N7Nmzqa6uZsqUKSxfvlxsWNdSGhsbUalUpKWlsW3bNg4ePIhOp8PT0xNBEMRysPs5zTY2NsyaNYtp06YxadIkVCoVX375JTNnzrwnByUmJobJkyczaNAgdu7c2Wwi7L2gUqk4cOAAn3/+OadPn8bDw4Mff/yR0NBQ8UG1bNkyUlJS6NixI127diU5OZnr168zatQovvzyyxZX8sCNJbOff/6ZDRs2kJKSglarpWvXrnz44YdERES0aFaSm5vLa6+9xrFjx265Fo/CA15C4l54VJyToqIiIiMjaWhoICAggLS0NEaMGEFAQECzv2F6PshkMuRyOXl5eYSEhBAcHNzikma4IURWUVFBcXExOTk5ODk5IZPJKC8vp6GhAVdXV1xcXEhNTcXZ2ZmgoCAyMjLQ6/VNBMzq6uowNzfH2dmZqqoqUcFUqVTi6uqKSqWipKQEHx8fJkyYIAo83g1BEPjll1+Qy+WUlJTw5JNPNqmOvFeMRiP5+fmcPHmShoYGwsLCRI2UUaNGUVRURFxcHLm5uTg6OqJSqdDpdJiZmYkRkP79+zNo0KAWlV/X19eTnp7OmTNnqKqqwtHRUVzicXFxYfz48S3KR0xMTOTQoUNotVqUSiX29vYcOnSInTt3/jmcE1PvicjISORyOWvXrmXWrFktemklJSUxf/58Tpw4QdeuXfn444+ZNGlSi73klJQUVqxYwcmTJyktLRWbNbm4uNCrVy9cXV0JCgpCoVBw5swZ4uPjxQSilmBubs6rr75Kv379mDdvHvb29mKN/ZIlS1r0YDIYDCxbtozVq1ezZMkSli9f3qJ9/5by8nKOHTvGP//5T1EKe/DgwWzcuBE7OzuioqLYtm0b8fHxGI1GAgMD2bJlC/7+/qSnp7N06VIiIyPx8PDggw8+YPr06be90U0zhHXr1rF//36uXLmCwWDA19eXJUuWMH78+GbVDO9EZWUl48aN49y5c00+f9gPeAmJe+VhOyf29vYkJiZy5MgRPDw8qKiowNnZmXHjxt01MmowGDh+/DhXr17Fz8+PgwcPihWULc0BjI2NRaPRkJOTg6OjI+3ataOuro6rV6+i0WgwGAw4ODhgbm5OVVUVWq0WS0tLsdOxi4sLTk5OFBcXo9VqKS8vx9HRkc6dO1NcXEy7du3EdiHXr19HLpdjZWWFjY0NEyZMoHv37i06Xz/99BPdunUjOTmZ11577Z57I2k0Gi5fvsylS5eora0lICAACwsLLl26hJeXFzY2Nly+fFls3mh6rwwbNgxfX1/S0tI4f/48gwYNory8nLq6OoYMGSJ2LL4ZQRCoqanh4sWLJCYmUltbi6WlJR07dhTVwPv160fv3r3vaZXBVBWk1+sxMzMjMzOT7du3P/7OSU5ODi+88AIZGRl06dKFTz/9lMmTJ99TZOHq1at88MEHbN26FQsLC0aPHs2KFSvw9/dv0e9otVpKSkpEMTBHR0c8PDxQKBS3rKlWVVURGRnJgQMHOHToECqVisbGRmQymaji+lvMzc1ZuXIlP/zwA0lJSdjY2IidHr/99tsWebB1dXWEh4eTkpLCoUOHxI6Vd8NoNFJcXMzGjRv58ccfKS8vx97eHn9/fyIiIujatStRUVFs3bqVkpISUcHw2WefZc2aNU1mC7W1tWzcuJG1a9dSXV1Nv379mDp1KlOmTMHW1hZ7e3saGxs5ceIE27dv58CBA2LUycrKihdffJGPPvrogZKYs7KyiIiIaNJ9VHJOJNoaD9M5qa6uJjk5mcOHDyMIAnK5nG7dujFx4sR7qo5JS0sjOjqa0tJSLC0tsba2JiQkhAEDBtzSYfm3mJ5LhYWFVFZWAjdk1t3c3Gjfvj02NjZi3ktjYyO5ubmcOXOGiooK1Gr1HZdYrK2t8fHxoaKigry8PGxtbXFycsJoNFJTU0P79u0ZO3YsPj4+dx1jXl4e0dHRODk54ezsfNciBhO1tbWkpaURFxeHwWBAp9Ph5uZGdXU1jY2NODk5ibIPDg4OyOVy5HI5169fJywsjCFDhohjzM/PJyYmBnd3d9zc3EhMTEQQBAIDA7GxscHd3Z2amhqSkpLIzc1Fp9PRqVMnDAYDxcXF2NraMmLECIKCgu47R+jcuXMcPHgQg8Hw58g5OXHiBLNmzSI3N5ewsDA+//zz26q73o3GxkbWrl3L2rVrqampwdHRkeeee4758+fTq1cv5HJ5qyaTarVaKisrKSws5OrVqzg4OIhtp03U19eLXYKVSiXOzs4kJSWhVCr58MMP2bRpE506dWL9+vUtulHOnTvH+PHjGT9+PJs3b76roV25coVPP/2U/fv34+HhQVhYGDNmzECn03HhwgW2bt1KUlISKpUKuOFEeXt7M336dBYtWtRsVMQks//+++9z8uRJdDod7dq1w9LSkt69e3Pt2jWysrLERGILCwv69u3LqlWrWqVDqek8TJkyhfz8fEByTiTaHg/TOTlw4ABnz54VxQwHDx7MsGHD7ktSvqKigh9//FHMIzPlkgUFBdG9e3exG3BrIAgCFRUV5OTkUF5eTmVlJTY2NpibmzdZ6tHr9ZSXl9OnTx9UKhXZ2dm0a9eOiooKGhoa8PHxQRAEevXqxeDBg+867kOHDlFWVkZlZSUzZsxo0i34t+j1ehITE4mNjUUul1NaWorRaCQ0NJTCwkKqq6sxNzenb9++6PV6MbpTUVGBq6srY8aMuUW1G26kPcTGxpKXl4e3tzfW1tbk5OSQn5+PTCZDp9NhZWWFTqdr4tT16tWLESNG3PGYW4rJbtLT09mxY8fj7Zx07dqVgoIC3n33XRYvXnxXb/tuCILAgQMH+OSTT4iPjxdVVb28vBg0aBBPPfUUgYGBODg43LYkuLUxGo0cO3aMlStXcuLECfGlPWDAAFavXs27776L0Wjk22+/bdK19HbjM6nbfvXVV6LybHNERkayYMECnJ2defHFF8UeChMnTmTChAkUFxcjCAIuLi74+fmJ1UazZ88Ww6l3or6+nsTERPbs2UN6ejqhoaH8/PPPJCUlibX1Y8aMYfDgwUyYMKFJAnFr8PHHH/P3v/8dkJwTibbHw3ROVq1ahUajEZdxTL277he1Ws2BAwdIS0vDzs4OJycnCgsL0ev12NjY4OnpiZeXFw4ODnTo0EHU0bjXfZoiH+bm5nd8ngiCQHV1NbGxseTm5iIIAt27d0epVHLs2DE8PT3FaIa/vz+DBw++47GY8iFtbGyQyWQ8++yzzU6y1Go1v/76KxcvXkSlUqHVaunevTv29vZYWlpSUFBAWVmZ6BiVl5ejVqtxcXFhyJAheHl53dWRMyl/m86vTCZDJpPh7OyMpaUlaWlpWFlZ4eLiQlhYGL6+vq32ntNqtWzYsIFff/2V/fv3P97OiUwm429/+xsffPBBq/ZYqK6u5sMPP+Sbb75pIqluZmaGs7MzdnZ2+Pv7Y2dnx7Bhwxg0aBAeHh44OTnR0NBAZWWluEaXn59PQUFBkyRMlUrFyZMnsbS0ZMaMGYSGht7VsdJoNBw+fJh//OMfHD9+HJ1OxyuvvMKsWbOYO3cujY2N7Nq1667VOPX19bz88sskJiby66+/ihLLN/Pdd9+xZMkSsStzTEwMEydORK/Xs3PnTjIyMnBycuL5559n8eLFoobJ/RqxIAhkZWUxc+ZMTp8+jYuLC4sWLSIsLIzAwMBWa51+M/Hx8YwZM4aGhgbJOZFoczxM52Tfvn2Ulpby3HPPPXByvQlBEMjJyWHfvn3Y2dnh6+tLamoq5eXlYlKnaQnJ3NwcJycnunTpgpubG87OztTX14uVkoIgUFVVdUshgtFopLq6GgsLC3x8fAgICKB79+63TcQVBIHi4mJOnjxJVlaW2MunqKgINzc3zM3NkclkeHh48MQTT9xxQmZKHDY3N8ff3/+Wbu0ajYadO3dSWlqKwWDAzc2N/v37c+TIEQoKCnBwcMDZ2ZnKykrs7OwIDQ3F29tbPBf3+v5Tq9VUV1eTkZFBXFycqKxdW1tLcHAwYWFhLYrG3yv79u3j+++/f/wTYpcuXcqHH37Yqo6JCYPBwIYNG1i9ejXFxcV33NbkabZr1476+npKS0vFkjWdTndL+drNyOVyevToweLFi5k0adJdPd/GxkZWrlzJypUrEQSBdevWMWjQIDHPpiUOyoULFxgxYgTTp0/nn//8p3hz6nQ6Nm7cyMaNG/n4449Rq9WcOXOGXr168fXXX3Pp0iXgRvv1DRs24Ofn98DdQY1GIydOnGDmzJlNOgybmZkhl8v56KOP+Nvf/vZA+7jdfnfu3Mm0adMk50SizfEwnZN169Yxffr0B45UN0dNTQ3//e9/KSwsxMHBgY4dO5KWliYmepomQCbNjN+KSJq+NzkzN2NSYzV9bmZmhre3N2FhYc1O0kyYnJT9+/eLS+/Ozs64ublRV1eHhYUFHh4ed42gREdHc+3aNerr65kxY4YYvSkvL2fPnj1cvXpVdIDMzc25ePEiMplMlLN3dHRkwIABDBgw4IGveX19PceOHRN7kjk7O1NdXY2NjQ06nQ57e3tee+21Vn+3qlQqli9fzpo1ax5v56S6urrVw/2/JSsri3HjxpGbm/u77sfS0pLg4GBWrlzJ0KFD7/jS1+l0rFixglWrVmFjY0NUVBQA06dPx8XFhcjIyDsmyRoMBmbPns327dv56KOPmD9/PiUlJfz973/n2LFjfPPNN5ibm/PZZ5+h1+uJj49Ho9Hg5ubG66+/zsKFC1vlwaTT6di+fTsLFy6kqqrqlu+VSiVbtmwhPDz8gffVHA+74kFyTiTul4dpu8XFxa3S/uN2NDY2cvLkSeLi4gBwdXWluroavV4vTvQsLCwwGAxixNYkCW/idpFck1K0yVExJfAHBgYyfPjwO57L6upqdu3aRV1dHZ07dxaVUxsbG6mpqWH06NF31BKprq5m69at+Pv7c+XKFYYOHUpJSQkJCQlYWlri4eGBwWCgoKBArH6ysbGhqqoKZ2dnxowZg6+v732d05spLy/nl19+obS0FKVSiUajwd7eHr1ej5OTEzk5OfTq1YtJkya1ikjeb9m9ezdTpkx5vJ2TP+rG/OSTT1i6dOnvvh+4Uenz/PPP884779CzZ8/bbmeKcixatIjevXsTHR1NamoqM2bMoF+/fmzatOmODkpKSgqjR4+mqqoKX19fKioq6NChA5988glpaWn885//FBNGAQYOHMiGDRvo27dvq6xB5uXlsWzZMnbu3NlEoA5uRJMmTJjAG2+8wahRo3633B7JOZFoqzzutqvVavnqq6/ESUuHDh3QarVNEmdvjpiYIiim11lzzom5uTl2dnYIgoC5uTnt2rXD2tqa7Oxs1Go1tra2DBo0iL59+942gl1dXc327dsZNGgQycnJBAYGkpubK1YMjRo16o7LIceOHSMhIQGNRoONjQ2Ojo5otVosLCzQarVcv34dmUxGnz59qKio4Nq1a4SFhTFo0KD77kdkQq/Xc/78eY4fP45MJhO7RCcmJooVTKbecBMmTPhdViSg5c7Jg8Xk/yQ8iHjOvVJTU8NXX33FqFGjeOutt24bsZHL5cydO5eFCxeSnJzMu+++y+DBg9m9eze5ubmsWLGi2bJkE35+fixfvhyZTEZmZiahoaGsX7+eXbt2sXjxYtExsbS05K9//Sv79+8nODj4gR2F69evs379ekaNGiUq51paWmJmZoa7uzszZswgKiqKH374gdGjR/8hSccSEhKPFqbSYrgR5SgqKkKr1eLk5CRGPExLNAaDQXQ4LCwssLCwED83GAxYWVnh6uoq5sbV19dTXV1Nbm4u6enpGAwG2rdvT/v27Tly5Aj//ve/OX36tFh8cDNKpZJp06Zx4cIFgoKCOHXqFKGhoaJg2969e6mpqbntuEJDQ3F3d0cmk+Hj4yP2szHJ7Xfq1AmZTEZBQQHV1dX85S9/YcSIEQ/kmAiCQGZmJps3byY6Ohpzc3Pkcjk5OTnExcWhVquxtramV69evPzyyzz77LO/m2NyL7Rcku9PTHx8/O/223K5nMDAQBwdHUlNTRWTuoqLi1m/fj2RkZG88847zJ079xaDkcvlvP/++yQmJrJr1y4GDhzIW2+9xc8//8wrr7zCN998wxtvvHHbfb/66qu4ubmJs6E5c+aQnJws3ui9e/dmwYIFvPTSS/ek3tgcjY2N/Prrr6xYsYKEhASMRiMdOnRg1qxZPP3006SlpTFy5Eg6dOjwu4QSJSQk2g4qlUpMaDU5InV1dbi5uWFvb09dXR3t2rXDzMyMqqoqamtrmyzrmJmZYWtrS7t27dBoNNTW1lJSUiI+20wRFqPRiF6vR6PRoNfrcXBwoLa2lujoaFJSUggPD79FXl+pVDJs2DBiY2Pp168f0dHRPP/886SmpnLq1CnOnj3L0KFDm33BKxQKpkyZwt69e9FoNMjlcvLz8+ncuTMODg4UFBTQ2NiIi4sLkyZNeuCE45KSEo4dO0ZWVhbu7u4olUqqqqqQy+X4+/tTVlaGn58fPXv2bLXk5tZCck5aQF1d3e/yuwqFglmzZvHZZ5+hUCioqKjg+PHjfPLJJ6JIWF5eHu+99x5nzpzhH//4xy3qi87Oznz22WeEh4fz2WefERoaSr9+/dizZw85OTli6LM5LCwseOqpp9i8eTNLly6loqIChULB0KFDiYiIYMqUKa2SW5KVlcWSJUvYu3cvWq2WHj16MH36dF566SWxAeKAAQMeeD8SEhKPByan4XafW1hYYGNjg52dHb169SIhIQFra2tkMhkuLi6cPXsWW1tbSktLxQjIzQmxHTp0YMiQISiVSoqKikhPTycvLw9HR0cGDRrEmTNnKCoqYvPmzYwfP54+ffo0yQPs1q0bxcXFFBcXI5fLOXz4MBMmTMDBwYErV66g1+tvG32wsbFh9OjR7Nu3j+rqalQqFUVFRRiNRtzd3Rk9ejS+vr4PNEkTBIGzZ89y9OhRampqcHV1paioiPbt2zNu3DgCAwPvWa32j+ZP6ZwUFRVRWFjIoEGDWrRs0Fx470Hw8vLCz8+PadOmMWXKFLHm3cHBgTFjxhASEsLYsWPJzs4G/q/LZWpqKqtXr2bMmDFNbpSAgAD+93//l1dffZWlS5fy888/4+zsfFfhnPLycpYuXcqWLVuws7PjtddeIyIigqFDh7ZKF01BEDh58iRz584lJSUFe3t7Fi5cyLx58x5I5VVCQqLtkZSUxNWrVxk+fPhdlymaq7Yx5WeYIiomwUqj0dikUic7OxszMzPKy8tvmZyZm5vj5eXF2LFjcXFxQaVS0b59e4KCgvDy8uLIkSMkJCTg4OCARqNBrVbz888/k5mZyejRo5vkSDzxxBPs2LGDLl26kJqaSlFREb6+vndNWs3KyiImJobQ0FCysrLIy8vDx8dHlKR40MixVqvl0KFDnD9/HrVaLQqITpw4EX9//0diyaYl/OmcE5N8uUwmIz4+/q6Ko/n5+Zw4caLV9m9lZcWXX34p5lM0NDSIYmQJCQlUVlayePFi3n77bRYuXNhk9pCcnMzkyZOZPXs2f/vb35pEUSIiIjh+/DibN2/m//2//8dbb711R8ervLycl156iejoaAICAvjqq6/o37//A5cGm9BqtezatYu33nqLuro6hg4dyvLlyxkyZIi0bCMh8Sfj3LlzJCYmYjQaKSsru2uD1bS0NLFxnbm5OR4eHjQ2NlJdXS06HKbKG1M0xLSsY+o0bGdnh8FgQK1WA+Dm5kZwcDBOTk6UlpZy7Ngx8vLyUKvVCIJA586dxQaAjY2NTZZ/UlJSKCwsZNSoUfj5+QEgk8kYO3Yse/bsoVevXsTExDBt2rQ7RiRMjomDgwMXL17E09OTOXPmtFqCcVVVFVFRUaSlpSGXy7GxsSEwMJCRI0e2mtLuH8WfqlqnoKCA8PBwkpKS8PDw4PDhw3fsjqtWq5k9ezbff/99axy2iIuLi+hYNDY2kp+f3yR51c7OjvXr13PkyBG2bt16y9+bmZkREhLCtm3bmlT0lJSU8MQTT9DQ0EBkZCT9+vVrdv/l5eXMnDmTmJgYJkyYwNdff33PTfTuhEaj4e9//ztffPEF5ubmzJ07l//5n/9p9ZvDFNG635nA417xIPH40pZs98KFCxw8eBBnZ2cUCgXe3t4MGzbstttfu3aN7du3U1dXh0wmIyQkhOzsbLFyx+ScmJwRkxNhepWZnBOlUknHjh2prKwUm7LerJdy8+TNzMwMGxsbGhoa0Ol0Yq+zTp06if144Eae39ChQxk0aJCYh3f+/HlSUlJQKpXY2dkxYsSIZieGWVlZnD59mh49ehAfH8+oUaPo3bt3q00IKysr2blzJ+Xl5RgMBlxdXXn66afp0qVLqxQWmETsVCoVgiDg4eFxX7mILa3W+dNETurr61myZAlJSUkEBQVRV1fHxYsX7+icHDhwgJ07d97xd03lV3Bj+aUl3YbLy8spLy+/47G+8cYbTJ48GblcfkvVjSAInDt3jieffJIvvviC8PBw5HI57u7uvP322yxatIj58+cTHR19i0NgipicPn2a5cuX88477zxwidrN1NTUsHz5ctatW4dCoWDNmjXMnj37vnriCIJAQUEBhYWFTRr16XQ6YmNjxXM4fvx4hg8fTkhISKssR0lISLQOV69e5ciRI+h0OsrKynBxcbmjqKXRaOTw4cPU19fj6uqKubk5586dQ6/Xi06IqXTYFIE1LeuYXvIm/ZPa2lpSUlLo1q0bnTp1wmg0UllZicFgEFtk9O7dm9TUVAoKCsTGoyZdEUEQUKlU2NjYiP3DtFothw8fpqioiHHjxqFUKvHz8+PChQt4eXkRHx+Pt7f3LRWeWVlZxMXF4ezsTFpaGpMnT6ZTp06tdp4zMzM5dOgQcKNxoZOTE5MnT76vnjj19fWkpaVRX1/fJKVBpVKRlpaGQqEQn7Pe3t4EBQXh5ubW6stFfxrn5JdffmHHjh0EBweze/duzMzM7tjaWxAEfvnll9uquyqVSgIDA5kxYwb9+/fHzMyMtLQ0Tp48SXZ2NgaDgdTUVK5fvy6GDe8FlUrF999/f0dn5+rVq0yfPp0FCxawZMkS7O3tefXVV9mxYwcJCQns3buXadOmids3Njby17/+lYKCAn744YdbclcelOrqat58801++OEH7O3t2bhxIxEREfe9jFNTU8OGDRvIyMhoNu9HEASSk5OJi4vD1taWvn37MmTIEMaPH8+AAQMkR0VC4iFiNBqJiYnB39+fCxcu8OSTT4r9Ym5HfX09hYWFoqNwc0M+ExYWFri5udGhQwfMzMwoLS2lvLxcbDVyc5TAJIuvUChwdXXF2dkZlUpFbW0teXl55OXliSXHDQ0NjB8/nvPnz4sy8pWVlVhYWIgOkek3MzIyqKio4Omnn8bLy4sRI0aI3d5PnjwplgTDjed0dHS0+DtTpkxp1SjypUuXOH78OEajkdraWrp3787YsWPvu5jhypUrlJWV4ezs3CSyUV9fL46hQ4cOODs7o9Pp+OWXXzAYDPTq1Yu+ffu2WhT+T+OcxMbGYmVlxZo1a+663gk3BGvq6+uRy+V4eHjg4OCAlZUV/fv3Jzg4mJEjR+Lp6dlkfdHf35+pU6cCN27MqqoqioqKxHryixcvUlpaSkFBgVi21tyFFASBwsJCsdnendBoNKxZs4a6ujqWLVuGi4sLq1ev5plnnmHhwoX4+fkREBAAQHFxMUqlkv3797foHNwLRUVFvPLKKxw9epTg4GDWrl3L0KFDHyicqFQqWb169W2/12q1ZGdnExkZybZt2zhz5gwXL14kOjqaZcuWMXHixPvet4SExIOhUqmoqqrCzs6OHj16EBISctfngUqlQiaT0dDQIOqXmJZcnJ2d6dGjB15eXnTq1KnJb9XX19PY2IjRaKSoqIjS0tImTotaraaoqAhAXN75bUS6pqaGqKgorK2tmywTKRQKunTpwpUrV0QHyJSQ+9NPPzFp0iR8fHzo2LEj9fX12NnZceLECYYPH465uTl5eXl07NiRTp06ERQU1Go5dwaDgeTkZGJiYhg6dCjnz58nICCAcePGPVAUw9/fny5dujQ7IfTz80OtVnP58mUuX76MmZkZAQEB5ObmYmVlRXJyMiNHjmyVSW+bzDmpqalBqVSK/RdawvLlyzlx4gRRUVEtnlFXVVWRnp5Ojx49sLOzw8zM7IFm4zqdDp1OR1FREXq9Hnt7+1tq6E3k5+cTHx/P7t27iY2NbdHvL1y4kGXLlmE0GnnzzTf5/vvv8fPzY9OmTfTo0UO8KVs7IfXKlStMnTqVjIwMrK2tOXDgwF37+7Q29fX1nDp1ig4dOuDt7Y1CobjrDVJbW0unTp3+kDYIJu7HdiUkfsvDtN2tW7e2aClYo9Fw8OBB1Go1w4YNw8PDo0X7ycnJISUlRdT+sLCwwNXVFYVCcU/PLkEQ0Gg0VFVV0dDQQGFhIQaD4a7P3ZycHKytrZu84O3t7XF3dyc3N1fs+6XX6zEajVhZWTF27FgaGxs5evQotra2VFdX4+/vT/fu3VGr1ajVapycnFpVVDI/P5/U1FTMzMzQ6XS4uroycODAP0y4UqvVkp+fT1JSEk5OTnh5eYk6NHfi1KlTfPbZZ3e13TbpnOTm5t6xUZOExL1QWFj4h5U2S7Yr0ZpItivRVrmb7bbJZR1Tkk9BQcEfNmv4ozHNjB7nGfbDHqMgCNTV1f2uTcx+i2S7jwcPe4yS7f4+POzr+kfwsMfYUtttk86JKVzv6Oj42BqQCQcHB2mMvyN/9ENWst3HC8l2H08k2/19aYntSo3/JCQkJCQkJB4pJOdEQkJCQkJC4pGiTTonCoWCZcuWPdY6FtIYH0/+DGOWxvh48mcYszTGR4c2Wa0jISEhISEh8fjSJiMnEhISEhISEo8vknMiISEhISEh8UghOScSEhISEhISjxSScyIhISEhISHxSCE5JxISEhISEhKPFG3SOfnXv/6Fl5cXVlZWDBw4kISEhId9SC1i5cqV9O/fH3t7e1xdXZk0aRIZGRlNtmlsbGTevHm0a9cOOzs7Jk+eTGlpaZNtCgoKmDBhAjY2Nri6uvLee++h1+v/yKG0mFWrVmFmZsY777wjfva4jfFekGy37VxXyXabItlu27muj4XtCm2MHTt2CJaWlsKmTZuE1NRUYfbs2YJSqRRKS0sf9qHdlbFjxwqbN28WUlJShKSkJOGpp54SOnfuLNTX14vbzJkzR+jUqZNw+PBh4dy5c8KgQYOEsLAw8Xu9Xi/4+fkJo0aNEi5cuCBERUUJ7du3F/72t789jCHdkYSEBMHLy0sICAgQ5s+fL37+OI3xXpBst+1cV8l2myLZbtu5ro+L7bY552TAgAHCvHnzxP83GAyCp6ensHLlyod4VPdHWVmZAAjHjx8XBEEQqqurBblcLvz444/iNunp6QIgnDp1ShAEQYiKihLMzc2FkpIScZuNGzcKDg4Ogkaj+WMHcAfq6uqE7t27CzExMcKwYcPEm+RxGuO9Itlu27iuku3eimS7beO6Pk6226aWdbRaLYmJiYwaNUr8zNzcnFGjRnHq1KmHeGT3R01NDfB/3T4TExPR6XRNxtezZ086d+4sju/UqVP4+/vj5uYmbjN27Fhqa2tJTU39A4/+zsybN48JEyY0GQs8XmO8FyTbbTvXVbLdpki223au6+Nku22qK3FFRQUGg6HJyQNwc3Pj8uXLD+mo7g+j0cg777zDE088gZ+fHwAlJSVYWlqiVCqbbOvm5kZJSYm4TXPjN333KLBjxw7Onz/P2bNnb/nucRnjvSLZbtu4rpLt3opku23juj5uttumnJPHiXnz5pGSkkJcXNzDPpRWpbCwkPnz5xMTE4OVldXDPhyJ3wHJdiXaKpLtth3a1LJO+/btsbCwuCXDuLS0FHd394d0VPfOm2++SWRkJEePHqVjx47i5+7u7mi1Wqqrq5tsf/P43N3dmx2/6buHTWJiImVlZQQHByOTyZDJZBw/fpwvv/wSmUyGm5tbmx/j/SDZ7qN/XSXbbR7Jdh/96/pY2u4fnuXygAwYMEB48803xf83GAxChw4d2kRiltFoFObNmyd4enoKmZmZt3xvSlr66aefxM8uX77cbNLSzVnyX331leDg4CA0Njb+/oO4C7W1tcKlS5ea/OvXr58wffp04dKlS4/FGO8XyXYf7esq2e7tkWz30b6uj6PttjnnZMeOHYJCoRC2bNkipKWlCa+//rqgVCqbZBg/qsydO1dwdHQUjh07Jly7dk38p1KpxG3mzJkjdO7cWThy5Ihw7tw5ITQ0VAgNDRW/N5V7jRkzRkhKShIOHjwouLi4PJIlbSZuzhoXhMdzjC1Bst22d10l272BZLtt77q2ddttc86JIAjCunXrhM6dOwuWlpbCgAEDhNOnTz/sQ2oRQLP/Nm/eLG6jVquFN954Q3BychJsbGyEZ599Vrh27VqT38nLyxPGjx8vWFtbC+3btxfeffddQafT/cGjaTm/vUkexzG2FMl229Z1lWz3/5Bst21d17Zuu2aCIAh/3CKShISEhISEhMSdaVMJsRISEhISEhKPP5JzIiEhISEhIfFIITknEhISEhISEo8UknMiISEhISEh8UghOScSEhISEhISjxSScyIhISEhISHxSCE5JxISEhISEhKPFJJzIiEhISEhIfFIITknEhISEhISEo8UknMiISEhISEh8UghOScSEhISEhISjxT/Hy7wUJLUXV9oAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import draw_tools\n", + "image = numpy.array(PIL.Image.open(io.BytesIO(recorder.image.value)))[..., :3]\n", + "# 生成风格图像\n", + "im = draw_tools.generate_style_image(image)\n", + "# 获取轮廓列表\n", + "contour_list = draw_tools.getContourList(im, pen_width = 3, min_contour_len = 30, is_show=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# 对轮廓列表进行排序\n", + "contour_list = draw_tools.sortContoursList(contour_list)\n", + "# 平滑拟合并采样轮廓\n", + "f_contour_list = draw_tools.sample_and_smooth_contours(im, contour_list, is_show=False)\n", + "# 保存轮廓点到文件中,每个轮廓占一行,x和y坐标用逗号分割,点之间用逗号分割\n", + "draw_tools.save_contour_points(f_contour_list, \"../data/contour_data.txt\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import shutil\n", + "shutil.copy( \"../data/contour_data.txt\", \"/home/robot/Work/system/bspline.txt\")\n", + "import os\n", + "currdir = os.getcwd()\n", + "os.chdir('/home/ck/')" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "#1、执行./IGH_rc.sh,启动igh\n", + "#2、执行./runIGH.sh,开启通讯\n", + "#3、执行./runrobot.sh,运行画画程序\n", + "#4、执行./stoprobot.sh,关闭画画程序\n", + "#5、执行./runrobotoig.sh,运行运动程序,可在桌面程序上运动\n", + "#6、执行./stoprobotoig.sh,关闭运动程序\n", + "#7、执行./stopIGH.sh,关闭通讯" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting EtherCAT master 1.5.2 done\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#1、执行./IGH_rc.sh,启动igh\n", + "os.system(\"./IGH_rc.sh\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Failed to reserve master: Device or resource busy\n", + "root 38530 1 1 Dec06 ? 00:04:26 ./IgHEtherCATMaster --task run --file ./eni.xml --affinity 1\n", + "root 111117 110176 0 02:13 ? 00:00:00 sh -c ps -ef | grep Master\n", + "root 111119 111117 0 02:13 ? 00:00:00 grep Master\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import time\n", + "#2、执行./runIGH.sh,开启通讯\n", + "os.system(\"./runIGH.sh\")\n", + "time.sleep(5)\n", + "os.system(\"ps -ef | grep Master\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "============start robot control======================\n", + "start run time:20231207021353\n", + "verison: 11.4.6\n", + "Author: HanBing\n", + "Email: A994055925@163.com\n", + "Data file path: /home/ck/robot_config/data/\n", + "Start system initialize!\n", + "OUT SINGLE ABSOLUTE ENCOUDER(POSITION CLOSED LOOP)\n", + "group init existing problem. Some fields are not found!\n", + "dof num is 6\n", + "control:6,state:6,mode:0\n", + "ac_position:6,ac_velocity:0,ac_torque:0,ac_position2:0,ac_velocity2:0,ac_sensor_torque:0,ac_mode:0,ErrorCode:0,FollowingErrorActualValue:0\n", + "position:6,velocity:0,torque:0,VelocityOffset:0,TorqueOffset:0,TorqueMaxLimit:0,TorqueMinLimit:0\n", + "/////////////////////////////////////////////////////////////////\n", + "fd_ecat_in_name: /ecat_in\n", + "fd_ecat_out_name: /ecat_out\n", + "/////////////////////////////////////////////////////////////////\n", + "state: Status Word\n", + "control: Control word\n", + "mode: \n", + "ac_position: Position actual value\n", + "ac_velocity: \n", + "ac_torque: \n", + "ac_position2: \n", + "ac_velocity2: \n", + "ac_sensor_torque: \n", + "ac_mode: \n", + "ErrorCode: \n", + "FollowingErrorActualValue: \n", + "position: Target Position\n", + "velocity: \n", + "torque: \n", + "VelocityOffset: \n", + "TorqueOffset: \n", + "TorqueMaxLimit: \n", + "TorqueMinLimit: \n", + "ec_di: \n", + "ec_do: \n", + "ec_ai: \n", + "ec_ao: \n", + "ec_li: \n", + "ec_lo: \n", + "Number of fields: 26\n", + "busyTs: 4000000 ns\n", + "/////////////////////////////////////////////////////////////////\n", + "ec_device1/robot0: \n", + "DOF: 6\n", + "0_state: 64\n", + "0_control: 64\n", + "0_mode: -999999\n", + "0_ac_position: 0\n", + "0_ac_velocity: -999999\n", + "0_ac_torque: -999999\n", + "0_ac_current: -999999\n", + "0_ac_position2: -999999\n", + "0_ac_velocity2: -999999\n", + "0_ac_sensor_torque: -999999\n", + "0_ac_mode: -999999\n", + "0_ErrorCode: -999999\n", + "0_FollowingErrorActualValue: -999999\n", + "0_position: 0\n", + "0_velocity: -999999\n", + "0_torque: -999999\n", + "0_VelocityOffset: -999999\n", + "0_TorqueOffset: -999999\n", + "0_TorqueMaxLimit: -999999\n", + "0_TorqueMinLimit: -999999\n", + "1_state: 144\n", + "1_control: 144\n", + "1_mode: -999999\n", + "1_ac_position: 80\n", + "1_ac_velocity: -999999\n", + "1_ac_torque: -999999\n", + "1_ac_current: -999999\n", + "1_ac_position2: -999999\n", + "1_ac_velocity2: -999999\n", + "1_ac_sensor_torque: -999999\n", + "1_ac_mode: -999999\n", + "1_ErrorCode: -999999\n", + "1_FollowingErrorActualValue: -999999\n", + "1_position: 80\n", + "1_velocity: -999999\n", + "1_torque: -999999\n", + "1_VelocityOffset: -999999\n", + "1_TorqueOffset: -999999\n", + "1_TorqueMaxLimit: -999999\n", + "1_TorqueMinLimit: -999999\n", + "2_state: 224\n", + "2_control: 224\n", + "2_mode: -999999\n", + "2_ac_position: 160\n", + "2_ac_velocity: -999999\n", + "2_ac_torque: -999999\n", + "2_ac_current: -999999\n", + "2_ac_position2: -999999\n", + "2_ac_velocity2: -999999\n", + "2_ac_sensor_torque: -999999\n", + "2_ac_mode: -999999\n", + "2_ErrorCode: -999999\n", + "2_FollowingErrorActualValue: -999999\n", + "2_position: 160\n", + "2_velocity: -999999\n", + "2_torque: -999999\n", + "2_VelocityOffset: -999999\n", + "2_TorqueOffset: -999999\n", + "2_TorqueMaxLimit: -999999\n", + "2_TorqueMinLimit: -999999\n", + "3_state: 304\n", + "3_control: 304\n", + "3_mode: -999999\n", + "3_ac_position: 240\n", + "3_ac_velocity: -999999\n", + "3_ac_torque: -999999\n", + "3_ac_current: -999999\n", + "3_ac_position2: -999999\n", + "3_ac_velocity2: -999999\n", + "3_ac_sensor_torque: -999999\n", + "3_ac_mode: -999999\n", + "3_ErrorCode: -999999\n", + "3_FollowingErrorActualValue: -999999\n", + "3_position: 240\n", + "3_velocity: -999999\n", + "3_torque: -999999\n", + "3_VelocityOffset: -999999\n", + "3_TorqueOffset: -999999\n", + "3_TorqueMaxLimit: -999999\n", + "3_TorqueMinLimit: -999999\n", + "4_state: 384\n", + "4_control: 384\n", + "4_mode: -999999\n", + "4_ac_position: 320\n", + "4_ac_velocity: -999999\n", + "4_ac_torque: -999999\n", + "4_ac_current: -999999\n", + "4_ac_position2: -999999\n", + "4_ac_velocity2: -999999\n", + "4_ac_sensor_torque: -999999\n", + "4_ac_mode: -999999\n", + "4_ErrorCode: -999999\n", + "4_FollowingErrorActualValue: -999999\n", + "4_position: 320\n", + "4_velocity: -999999\n", + "4_torque: -999999\n", + "4_VelocityOffset: -999999\n", + "4_TorqueOffset: -999999\n", + "4_TorqueMaxLimit: -999999\n", + "4_TorqueMinLimit: -999999\n", + "5_state: 464\n", + "5_control: 464\n", + "5_mode: -999999\n", + "5_ac_position: 400\n", + "5_ac_velocity: -999999\n", + "5_ac_torque: -999999\n", + "5_ac_current: -999999\n", + "5_ac_position2: -999999\n", + "5_ac_velocity2: -999999\n", + "5_ac_sensor_torque: -999999\n", + "5_ac_mode: -999999\n", + "5_ErrorCode: -999999\n", + "5_FollowingErrorActualValue: -999999\n", + "5_position: 400\n", + "5_velocity: -999999\n", + "5_torque: -999999\n", + "5_VelocityOffset: -999999\n", + "5_TorqueOffset: -999999\n", + "5_TorqueMaxLimit: -999999\n", + "5_TorqueMinLimit: -999999\n", + "/////////////////////////////////////////////////////////////////\n", + "/////////////////////////////////////////////////////////////////\n", + "/////////////////////////////////////////////////////////////////" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dev/mem: Bad address\n", + "/dev/mem: Bad address\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Device is running\n", + "InitRobot\n", + "Hardware Match\n", + "printf_hardware_information: can't get cpuid\n", + "Hardware information: \n", + " cpuid:0000000000000000\n", + " mac:128a5e84678d\n", + "init_robot_teach\n", + "set_robot_index\n", + "get_robot_num\n", + "set_addition_index\n", + "get_addition_num\n", + "get_robot_num\n", + "initialize finish\n", + "start Draw\n", + "power is finish!\n", + "150\n", + "1\n", + "2\n", + "3\n", + "4\n", + "5\n", + "6\n", + "7\n", + "8\n", + "9\n", + "10\n", + "11\n", + "12\n", + "13\n", + "14\n", + "15\n", + "16\n", + "17\n", + "18\n", + "19\n", + "20\n", + "21\n", + "22\n", + "23\n", + "24\n", + "25\n", + "26\n", + "27\n", + "28\n", + "29\n", + "30\n", + "31\n", + "32\n", + "33\n", + "34\n", + "35\n", + "36\n", + "37\n", + "38\n", + "39\n", + "40\n", + "41\n", + "42\n", + "43\n", + "44\n", + "45\n", + "46\n", + "47\n", + "48\n", + "49\n", + "50\n", + "51\n", + "52\n", + "53\n", + "54\n", + "55\n", + "56\n", + "57\n", + "58\n", + "59\n", + "60\n", + "61\n", + "62\n", + "63\n", + "64\n", + "65\n", + "66\n", + "67\n", + "68\n", + "69\n", + "70\n", + "71\n", + "72\n", + "73\n", + "74\n", + "75\n", + "76\n", + "77\n", + "78\n", + "79\n", + "80\n", + "81\n", + "82\n", + "83\n", + "84\n", + "85\n", + "86\n", + "87\n", + "88\n", + "89\n", + "90\n", + "91\n", + "92\n", + "93\n", + "94\n", + "95\n", + "96\n", + "97\n", + "98\n", + "99\n", + "100\n", + "101\n", + "102\n", + "103\n", + "104\n", + "105\n", + "106\n", + "107\n", + "108\n", + "109\n", + "110\n", + "111\n", + "112\n", + "113\n", + "114\n", + "115\n", + "116\n", + "117\n", + "118\n", + "119\n", + "120\n", + "121\n", + "122\n", + "123\n", + "124\n", + "125\n", + "126\n", + "127\n", + "128\n", + "129\n", + "130\n", + "131\n", + "132\n", + "133\n", + "134\n", + "135\n", + "136\n", + "137\n", + "138\n", + "139\n", + "140\n", + "141\n", + "142\n", + "143\n", + "144\n", + "145\n", + "146\n", + "147\n", + "148\n", + "149\n", + "150\n" + ] + } + ], + "source": [ + "# 运行画画\n", + "# 3、执行./runrobot.sh,运行画画程序\n", + "# 4、执行./stoprobot.sh,关闭画画程序\n", + "# 5、执行./runrobotoig.sh,运行运动程序,可在桌面程序上运动\n", + "# 6、执行./stoprobotoig.sh,关闭运动程序\n", + "os.system(\"./runrobot.sh\")\n", + "# os.system(\"./stoprobot.sh\")\n", + "# os.system(\"./runrobotoig.sh\")\n", + "# os.system(\"./stoprobotoig.sh\")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "#4、执行./stoprobot.sh,关闭画画程序\n", + "os.system(\"./stoprobot.sh\")\n", + "os.system(\"./stoprobot.sh\")\n", + "os.system(\"./stopIGH.sh\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/robot_painting/qmupd_vs/options/__init__.py b/robot_painting/qmupd_vs/options/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7eedebe54aa70169fd25951b3034d819e396c90 --- /dev/null +++ b/robot_painting/qmupd_vs/options/__init__.py @@ -0,0 +1 @@ +"""This package options includes option modules: training options, test options, and basic options (used in both training and test).""" diff --git a/robot_painting/qmupd_vs/options/base_options.py b/robot_painting/qmupd_vs/options/base_options.py new file mode 100644 index 0000000000000000000000000000000000000000..c61843e83642637b9cae2e835cc209fa2aa94598 --- /dev/null +++ b/robot_painting/qmupd_vs/options/base_options.py @@ -0,0 +1,145 @@ +import argparse +import os +from util import util +import torch +import models +import data + + +class BaseOptions(): + """This class defines options used during both training and test time. + + It also implements several helper functions such as parsing, printing, and saving the options. + It also gathers additional options defined in functions in both dataset class and model class. + """ + + def __init__(self): + """Reset the class; indicates the class hasn't been initailized""" + self.initialized = False + + def initialize(self, parser): + """Define the common options that are used in both training and test.""" + # basic parameters + parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') + parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') + parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') + parser.add_argument('--gpu_ids_p', type=str, default='0', help='gpu ids for pretrained auxiliary models: e.g. 0 0,1,2, 0,2. use -1 for CPU') + parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') + # model parameters + parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]') + parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') + parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') + parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') + parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]') + parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') + parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]') + parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]') + parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') + parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') + # dataset parameters + parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]') + parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') + parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') + parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') + parser.add_argument('--batch_size', type=int, default=1, help='input batch size') + parser.add_argument('--load_size', type=int, default=286, help='scale images to this size') + parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size') + parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') + parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') + parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') + parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') + # additional parameters + parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') + parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]') + parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') + parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') + self.initialized = True + return parser + + def gather_options(self): + """Initialize our parser with basic options(only once). + Add additional model-specific and dataset-specific options. + These options are defined in the function + in model and dataset classes. + """ + if not self.initialized: # check if it has been initialized + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = self.initialize(parser) + + # get the basic options + opt, _ = parser.parse_known_args() + + # modify model-related parser options + model_name = opt.model + model_option_setter = models.get_option_setter(model_name) + parser = model_option_setter(parser, self.isTrain) + opt, _ = parser.parse_known_args() # parse again with new defaults + + # modify dataset-related parser options + dataset_name = opt.dataset_mode + dataset_option_setter = data.get_option_setter(dataset_name) + parser = dataset_option_setter(parser, self.isTrain) + + # save and return the parser + self.parser = parser + return parser.parse_args() + + def print_options(self, opt): + """Print and save options + + It will print both current options and default values(if different). + It will save options into a text file / [checkpoints_dir] / opt.txt + """ + message = '' + message += '----------------- Options ---------------\n' + for k, v in sorted(vars(opt).items()): + comment = '' + default = self.parser.get_default(k) + if v != default: + comment = '\t[default: %s]' % str(default) + message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) + message += '----------------- End -------------------' + print(message) + + # save to the disk + expr_dir = os.path.join(opt.checkpoints_dir, opt.name) + util.mkdirs(expr_dir) + file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) + with open(file_name, 'wt') as opt_file: + opt_file.write(message) + opt_file.write('\n') + + def parse(self): + """Parse our options, create checkpoints directory suffix, and set up gpu device.""" + opt = self.gather_options() + opt.isTrain = self.isTrain # train or test + + # process opt.suffix + if opt.suffix: + suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' + opt.name = opt.name + suffix + + self.print_options(opt) + + # set gpu ids + str_ids = opt.gpu_ids.split(',') + opt.gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + opt.gpu_ids.append(id) + # if len(opt.gpu_ids) > 0: + # torch.cuda.set_device(opt.gpu_ids[0]) + + # set gpu ids + str_ids = opt.gpu_ids_p.split(',') + opt.gpu_ids_p = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + opt.gpu_ids_p.append(id) + + self.opt = opt + return self.opt diff --git a/robot_painting/qmupd_vs/options/test_options.py b/robot_painting/qmupd_vs/options/test_options.py new file mode 100644 index 0000000000000000000000000000000000000000..4a686174ca94a7e30aaf2ea87f7dc70339f306a1 --- /dev/null +++ b/robot_painting/qmupd_vs/options/test_options.py @@ -0,0 +1,25 @@ +from .base_options import BaseOptions + + +class TestOptions(BaseOptions): + """This class includes test options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) # define shared options + parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') + parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') + parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') + parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') + # Dropout and Batchnorm has different behavioir during training and test. + parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') + parser.add_argument('--num_test', type=int, default=50, help='how many test images to run') + parser.add_argument('--imagefolder', type=str, default='images', help='subfolder to save images') + # rewrite devalue values + parser.set_defaults(model='test') + # To avoid cropping, the load_size should be the same as crop_size + parser.set_defaults(load_size=parser.get_default('crop_size')) + self.isTrain = False + return parser diff --git a/robot_painting/qmupd_vs/options/train_options.py b/robot_painting/qmupd_vs/options/train_options.py new file mode 100644 index 0000000000000000000000000000000000000000..cfcafd378b2d1562e33adc6b6cb4586b6d9f0406 --- /dev/null +++ b/robot_painting/qmupd_vs/options/train_options.py @@ -0,0 +1,41 @@ +from .base_options import BaseOptions + + +class TrainOptions(BaseOptions): + """This class includes training options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) + # visdom and HTML visualization parameters + parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen') + parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') + parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') + parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') + parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') + parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') + parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') + parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') + parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') + # network saving and loading parameters + parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') + parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs') + parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') + parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') + parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') + parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') + # training parameters + parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') + parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') + parser.add_argument('--niter_end', type=int, default=200, help='# of iter to end') + parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') + parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') + parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') + parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') + parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') + parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') + + self.isTrain = True + return parser diff --git a/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1714068875325_fake_0.npz b/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1714068875325_fake_0.npz new file mode 100644 index 0000000000000000000000000000000000000000..551e28015f9641ea2c5d85366e9720668a406bc0 Binary files /dev/null and b/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1714068875325_fake_0.npz differ diff --git a/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1714068905063_fake_0.npz b/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1714068905063_fake_0.npz new file mode 100644 index 0000000000000000000000000000000000000000..64f0fcd30dea5c7048058f41de6030c8351ce178 Binary files /dev/null and b/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1714068905063_fake_0.npz differ diff --git a/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1_real_fake_0.npz b/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1_real_fake_0.npz new file mode 100644 index 0000000000000000000000000000000000000000..037d08703197bbe405c65a7c62adad69500b2618 Binary files /dev/null and b/robot_painting/qmupd_vs/outputs/sampling/rough_sketches__pretrain_rough_sketches/seq_data/1_real_fake_0.npz differ diff --git a/robot_painting/qmupd_vs/qmupd_single_image.py b/robot_painting/qmupd_vs/qmupd_single_image.py new file mode 100644 index 0000000000000000000000000000000000000000..90dd3b85814767824ea52d15285a68555dac82c1 --- /dev/null +++ b/robot_painting/qmupd_vs/qmupd_single_image.py @@ -0,0 +1,92 @@ +"""General-purpose test script for image-to-image translation. + +Once you have trained your model with train.py, you can use this script to test the model. +It will load a saved model from --checkpoints_dir and save the results to --results_dir. + +It first creates model and dataset given the option. It will hard-code some parameters. +It then runs inference for --num_test images and save results to an HTML file. + +Example (You need to train models first or download pre-trained models from our website): + Test a CycleGAN model (both sides): + python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan + + Test a CycleGAN model (one side only): + python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout + + The option '--model test' is used for generating CycleGAN results only for one side. + This option will automatically set '--dataset_mode single', which only loads the images from one set. + On the contrary, using '--model cycle_gan' requires loading and generating results in both directions, + which is sometimes unnecessary. The results will be saved at ./results/. + Use '--results_dir ' to specify the results directory. + + Test a pix2pix model: + python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA + +See options/base_options.py and options/test_options.py for more test options. +See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md +See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md +""" +import os +from options.test_options import TestOptions +from data import create_dataset +from models import create_model +from util.visualizer import save_images +from util import util +from PIL import Image +import numpy as np +import ntpath + + +if __name__ == '__main__': + opt = TestOptions().parse() # get test options + # hard-code some parameters for test + opt.num_threads = 0 # test code only supports num_threads = 1 + opt.batch_size = 1 # test code only supports batch_size = 1 + opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. + opt.no_flip = True # no flip; comment this line if results on flipped images are needed. + opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. + dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + model = create_model(opt) # create a model given opt.model and other options + model.setup(opt) # regular setup: load and print networks; create schedulers + # create a website + # web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch)) # define the website directory + # webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) + # webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch), refresh=0, folder=opt.imagefolder) + # test with eval mode. This only affects layers like batchnorm and dropout. + # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode. + # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout. + print("================,", model.model_names) + if opt.eval: + model.eval() + for name in model.model_names: + if isinstance(name, str): + print(getattr(model, 'net' + name).training) + for i, data in enumerate(dataset): + if i >= opt.num_test: # only apply our model to opt.num_test images. + break + model.set_input(data) # unpack data from data loader + model.test() # run inference + visuals = model.get_current_visuals() # get image results + img_path = model.get_image_paths() # get image paths + print('processing (%04d)-th image... %s' % (i, img_path)) + short_path = ntpath.basename(img_path[0]) + name = os.path.splitext(short_path)[0] + for label, im_data in visuals.items(): + ## tensor to im + im = util.tensor2im(im_data) + image_name = '%s_%s.png' % (name, label) + # 判断是否存在目录 + outimage_dir = "./robot_data/output" + if not os.path.exists(outimage_dir): + os.makedirs(outimage_dir) + save_path = os.path.join(outimage_dir, image_name) + print('save (%04d)-th image... %s' % (i, save_path)) + h, w, _ = im.shape + if opt.aspect_ratio > 1.0: + #im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic') + im = np.array(Image.fromarray(im).resize((int(w * opt.aspect_ratio), h), Image.BICUBIC)) + if opt.aspect_ratio < 1.0: + #im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic') + im = np.array(Image.fromarray(im).resize((w, int(h / opt.aspect_ratio)), Image.BICUBIC)) + util.save_image(im, save_path) + \ No newline at end of file diff --git a/robot_painting/qmupd_vs/rasterization_utils/NeuralRenderer.py b/robot_painting/qmupd_vs/rasterization_utils/NeuralRenderer.py new file mode 100644 index 0000000000000000000000000000000000000000..ca9aedc253d4c53621f49f958ac005954f18792c --- /dev/null +++ b/robot_painting/qmupd_vs/rasterization_utils/NeuralRenderer.py @@ -0,0 +1,171 @@ +import tensorflow as tf + + +class RasterUnit(object): + def __init__(self, + raster_size, + input_params, # (N, 10) + reuse=False): + self.raster_size = raster_size + self.input_params = input_params + + with tf.variable_scope("raster_unit", reuse=reuse): + self.build_unit() + + def build_unit(self): + x = self.input_params # (N, 10) + x = self.fully_connected(x, 10, 512, scope='fc1') # (N, 512) + x = tf.nn.relu(x) + x = self.fully_connected(x, 512, 1024, scope='fc2') # (N, 1024) + x = tf.nn.relu(x) + x = self.fully_connected(x, 1024, 2048, scope='fc3') # (N, 2048) + x = tf.nn.relu(x) + x = self.fully_connected(x, 2048, 4096, scope='fc4') # (N, 4096) + x = tf.nn.relu(x) + x = tf.reshape(x, (-1, 16, 16, 16)) # (N, 16, 16, 16) + x = tf.transpose(x, (0, 2, 3, 1)) + + x = self.conv2d(x, 32, 3, 1, scope='conv1') # (N, 16, 16, 32) + x = tf.nn.relu(x) + x = self.conv2d(x, 32, 3, 1, scope='conv2') # (N, 16, 16, 32) + x = self.pixel_shuffle(x, upscale_factor=2) # (N, 32, 32, 8) + + x = self.conv2d(x, 16, 3, 1, scope='conv3') # (N, 32, 32, 16) + x = tf.nn.relu(x) + x = self.conv2d(x, 16, 3, 1, scope='conv4') # (N, 32, 32, 16) + x = self.pixel_shuffle(x, upscale_factor=2) # (N, 64, 64, 4) + + x = self.conv2d(x, 8, 3, 1, scope='conv5') # (N, 64, 64, 8) + x = tf.nn.relu(x) + x = self.conv2d(x, 4, 3, 1, scope='conv6') # (N, 64, 64, 4) + x = self.pixel_shuffle(x, upscale_factor=2) # (N, 128, 128, 1) + x = tf.sigmoid(x) + + # (N, 128, 128), [0.0-stroke, 1.0-BG] + self.stroke_image = 1.0 - tf.reshape(x, (-1, self.raster_size, self.raster_size)) + + def conv2d(self, input_tensor, out_channels, kernel_size, stride, scope, reuse=False): + with tf.variable_scope(scope, reuse=reuse): + output_tensor = tf.layers.conv2d(input_tensor, out_channels, kernel_size=kernel_size, + strides=(stride, stride), + padding="same", kernel_initializer=tf.keras.initializers.he_normal()) + return output_tensor + + def fully_connected(self, input_tensor, in_dim, out_dim, scope, reuse=False): + with tf.variable_scope(scope, reuse=reuse): + weight = tf.get_variable("weight", [in_dim, out_dim], dtype=tf.float32, + initializer=tf.random_normal_initializer()) + bias = tf.get_variable("bias", [out_dim], dtype=tf.float32, + initializer=tf.random_normal_initializer()) + output_tensor = tf.matmul(input_tensor, weight) + bias + return output_tensor + + def pixel_shuffle(self, input_tensor, upscale_factor): + params_shape = input_tensor.get_shape() + n, h, w, c = params_shape + input_tensor_proc = tf.reshape(input_tensor, (n, h, w, c // 4, 4)) + input_tensor_proc = tf.transpose(input_tensor_proc, (0, 1, 2, 4, 3)) + input_tensor_proc = tf.reshape(input_tensor_proc, (n, h, w, -1)) + output_tensor = tf.depth_to_space(input_tensor_proc, block_size=upscale_factor) + return output_tensor + + +class NeuralRasterizor(object): + def __init__(self, + raster_size, + seq_len, + position_format='abs', + raster_padding=10, + strokes_format=3): + self.raster_size = raster_size + self.seq_len = seq_len + self.position_format = position_format + self.raster_padding = raster_padding + self.strokes_format = strokes_format + + assert position_format in ['abs', 'rel'] + + def raster_func_abs(self, input_data, raster_seq_len=None): + """ + x and y in absolute position. + :param input_data: (N, seq_len, 10): [x0, y0, x1, y1, x2, y2, r0, r2, w0, w2]. All in [0.0, 1.0] + :return: + """ + seq_len = raster_seq_len if raster_seq_len is not None else self.seq_len + + raster_params = tf.transpose(input_data, [1, 0, 2]) # (seq_len, N, 10) + + seq_stroke_images = tf.map_fn(self.stroke_drawer_with_raster_unit, raster_params, + parallel_iterations=32) # (seq_len, N, raster_size, raster_size) + seq_stroke_images = tf.transpose(seq_stroke_images, [1, 2, 3, 0]) + # (N, raster_size, raster_size, seq_len), [0.0-stroke, 1.0-BG] + + filter_seq_stroke_images = 1.0 - seq_stroke_images + # (N, raster_size, raster_size, seq_len), [0.0-BG, 1.0-stroke] + + # stacking + stroke_images_unclip = tf.reduce_sum(filter_seq_stroke_images, axis=-1) # (N, raster_size, raster_size) + stroke_images = tf.clip_by_value(stroke_images_unclip, 0.0, 1.0) # [0.0-BG, 1.0-stroke] + return stroke_images, stroke_images_unclip, seq_stroke_images + + def stroke_drawer_with_raster_unit(self, params_batch): + """ + Convert two points into a raster stroke image with RasterUnit. + :param params_batch: (N, 10) + :return: (N, raster_size, raster_size) + """ + raster_unit = RasterUnit( + raster_size=self.raster_size, + input_params=params_batch, + reuse=tf.AUTO_REUSE + ) + stroke_image = raster_unit.stroke_image # (N, raster_size, raster_size), [0.0-stroke, 1.0-BG] + return stroke_image + + +class NeuralRasterizorStep(object): + def __init__(self, + raster_size, + position_format='abs'): + self.raster_size = raster_size + self.position_format = position_format + + assert position_format in ['abs', 'rel'] + + def raster_func_stroke_abs(self, input_data): + """ + x and y in absolute position. + :param input_data: (N, 8): [x0, y0, x1, y1, x2, y2, r0, r2]. All in [0.0, 1.0] + :return: + """ + w_in = tf.ones(shape=(input_data.shape[0], 2), dtype=tf.float32) + raster_params = tf.concat([input_data, w_in], axis=-1) # (N, 10) + stroke_image = self.stroke_drawer_with_raster_unit(raster_params) # (N, raster_size, raster_size), [0.0-stroke, 1.0-BG] + stroke_image = 1.0 - stroke_image # [0.0-BG, 1.0-stroke] + + return stroke_image + + def mask_ending_state(self, input_states): + """ + Mask the ending state to be 1 + :param input_states: (N, seq_len, 1) in offset manner + :param seq_len: + :return: + """ + ending_state_accu = tf.cumsum(input_states, axis=1) # (N, seq_len, 1) + ending_state_clip = tf.clip_by_value(ending_state_accu, 0.0, 1.0) # (N, seq_len, 1) + return ending_state_clip + + def stroke_drawer_with_raster_unit(self, params_batch): + """ + Convert two points into a raster stroke image with RasterUnit. + :param params_batch: (N, 10) + :return: (N, raster_size, raster_size) + """ + raster_unit = RasterUnit( + raster_size=self.raster_size, + input_params=params_batch, + reuse=tf.AUTO_REUSE + ) + stroke_image = raster_unit.stroke_image # (N, raster_size, raster_size), [0.0-stroke, 1.0-BG] + return stroke_image diff --git a/robot_painting/qmupd_vs/rasterization_utils/RealRenderer.py b/robot_painting/qmupd_vs/rasterization_utils/RealRenderer.py new file mode 100644 index 0000000000000000000000000000000000000000..6a6e58df2869ae6a2b768938426434884a1b1209 --- /dev/null +++ b/robot_painting/qmupd_vs/rasterization_utils/RealRenderer.py @@ -0,0 +1,177 @@ +import numpy as np +import gizeh + + +class GizehRasterizor(object): + def __init__(self): + self.name = 'GizehRasterizor' + + def get_line_array_v2(self, image_size, seq_strokes, stroke_width, is_bin=True): + """ + :param p1: (x, y) + :param p2: (x, y) + :return: line_arr: (image_size, image_size), {0, 1}, 0 for BG and 1 for strokes + """ + surface = gizeh.Surface(width=image_size, height=image_size) # in pixels + shape_list = [] + for seq_i in range(len(seq_strokes) - 1): + p1, p2 = seq_strokes[seq_i, :2], seq_strokes[seq_i + 1, :2] + pen_state = seq_strokes[seq_i, 2] + + if pen_state == 0.0: + line = gizeh.polyline(points=[p1, p2], stroke_width=stroke_width, stroke=(1, 1, 1), fill=(0, 0, 0)) + shape_list.append(line) + + group = gizeh.Group(shape_list) + group.draw(surface) + + # Now export the surface + line_arr = surface.get_npimage()[:, :, 0] # returns a (width x height x 3) numpy array + + if is_bin: + line_arr[line_arr <= 128] = 0 + line_arr[line_arr != 0] = 1 # (image_size, image_size) + else: + line_arr = np.array(line_arr, dtype=np.float32) / 255.0 + + return line_arr + + def get_line_array(self, p1, p2, image_size, stroke_width, is_bin=True): + """ + :param p1: (x, y) + :param p2: (x, y) + :return: line_arr: (image_size, image_size), {0, 1}, 0 for BG and 1 for strokes + """ + surface = gizeh.Surface(width=image_size, height=image_size) # in pixels + line = gizeh.polyline(points=[p1, p2], stroke_width=stroke_width, stroke=(1, 1, 1), fill=(0, 0, 0)) + line.draw(surface) + + # Now export the surface + line_arr = surface.get_npimage()[:, :, 0] # returns a (width x height x 3) numpy array + + if is_bin: + line_arr[line_arr <= 128] = 0 + line_arr[line_arr != 0] = 1 # (image_size, image_size) + else: + line_arr = np.array(line_arr, dtype=np.float32) / 255.0 + + return line_arr + + def load_sketch_images_on_the_fly_v2(self, image_size, norm_strokes3, stroke_width, is_bin=True): + """ + :param norm_strokes3: list (N_sketches,), each with (N_points, 3) + :return: list (N_sketches,), each with (raster_size, raster_size), 0-BG and 1-strokes + """ + assert type(norm_strokes3) is list + sketch_imgs_list = [] + for stroke_i in range(len(norm_strokes3)): + seq_strokes3 = norm_strokes3[stroke_i] # (N_points, 3) + sketch_img = self.get_line_array_v2(image_size, seq_strokes3, stroke_width=stroke_width, is_bin=is_bin) + sketch_img = np.clip(sketch_img, 0.0, 1.0) # (image_size, image_size), 0 for BG and 1 for strokes + sketch_imgs_list.append(sketch_img) + + return sketch_imgs_list + + def load_sketch_images_on_the_fly(self, image_size, norm_strokes3, stroke_width, is_bin=True): + """ + :param norm_strokes3: list (N_sketches,), each with (N_points, 3) + :return: list (N_sketches,), each with (raster_size, raster_size), 0-BG and 1-strokes + """ + assert type(norm_strokes3) is list + sketch_imgs_list = [] + for stroke_i in range(len(norm_strokes3)): + seq_strokes3 = norm_strokes3[stroke_i] # (N_points, 3) + seq_len = len(seq_strokes3) + stroke_imgs_list = [] + + for seq_i in range(seq_len - 1): + stroke_img = self.get_line_array(seq_strokes3[seq_i, :2], seq_strokes3[seq_i + 1, :2], image_size, + stroke_width=stroke_width, is_bin=is_bin) + pen_state = seq_strokes3[seq_i, 2] + stroke_img = stroke_img.astype(np.float32) * (1. - pen_state) + stroke_imgs_list.append(stroke_img) + + stroke_imgs_list = np.stack(stroke_imgs_list, + axis=-1) # (image_size, image_size, seq_len-1), 0 for BG and 1 for strokes + stroke_imgs_list = np.sum(stroke_imgs_list, axis=-1) + stroke_imgs_list = np.clip(stroke_imgs_list, 0.0, 1.0) # (image_size, image_size), 0 for BG and 1 for strokes + sketch_imgs_list.append(stroke_imgs_list) + + return sketch_imgs_list + + def normalize_coordinate_np(self, sx, sy, image_size, raster_padding=10.0): + """ + Convert offset to normalized absolute points. The numpy version as in NeuralRasterizor. + :param sx: (N, seq_len) + :param sy: (N, seq_len) + :return: + """ + seq_len = sx.shape[1] + + # transfer to abs points + abs_x = np.cumsum(sx, axis=1) # (N, seq_len) + abs_y = np.cumsum(sy, axis=1) + + min_x = np.min(abs_x, axis=1, keepdims=True) # (N, 1) + max_x = np.max(abs_x, axis=1, keepdims=True) + min_y = np.min(abs_y, axis=1, keepdims=True) + max_y = np.max(abs_y, axis=1, keepdims=True) + + # transform to positive coordinate + abs_x = np.subtract(abs_x, np.tile(min_x, [1, seq_len])) # (N, seq_len) + abs_y = np.subtract(abs_y, np.tile(min_y, [1, seq_len])) + + # scaling to [0.0, raster_size - 2 * padding - 1] + bbox_w = np.squeeze(np.subtract(max_x, min_x), axis=-1) # (N) + bbox_h = np.squeeze(np.subtract(max_y, min_y), axis=-1) + + unpad_raster_size = (image_size - 1.0) - 2.0 * raster_padding + scaling = np.divide(unpad_raster_size, np.maximum(bbox_w, bbox_h)) # (N) + scaling_tile = np.tile(np.expand_dims(scaling, axis=-1), [1, seq_len]) # (N, seq_len) + abs_x = np.multiply(abs_x, scaling_tile) # (N, seq_len) + abs_y = np.multiply(abs_y, scaling_tile) + + # add padding + abs_x = np.add(abs_x, raster_padding) # (N, seq_len) + abs_y = np.add(abs_y, raster_padding) + + # transform to the middle + trans_x = np.divide(np.subtract(unpad_raster_size, np.multiply(bbox_w, scaling)), 2.0) # (N) + trans_y = np.divide(np.subtract(unpad_raster_size, np.multiply(bbox_h, scaling)), 2.0) + trans_x = np.tile(np.expand_dims(trans_x, axis=-1), [1, seq_len]) # (N, seq_len) + trans_y = np.tile(np.expand_dims(trans_y, axis=-1), [1, seq_len]) # (N, seq_len) + abs_x = np.add(abs_x, trans_x) # (N, seq_len) + abs_y = np.add(abs_y, trans_y) + + return abs_x, abs_y + + def normalize_strokes_np(self, strokes_list, image_size): + """ + + :param strokes_list: list (N_sketches,), each with (N_points, 3) + :return: + """ + assert type(strokes_list) is list + + rst_list = [] + for i in range(len(strokes_list)): + strokes_data = strokes_list[i] # (N_points, 3) + norm_x, norm_y = self.normalize_coordinate_np(np.expand_dims(strokes_data[:, 0], axis=0), + np.expand_dims(strokes_data[:, 1], axis=0), + image_size) # (1, N_points) + norm_strokes_data = np.stack([norm_x[0], norm_y[0], strokes_data[:, 2]], axis=-1) # (N_points, 3) + rst_list.append(norm_strokes_data) + return rst_list + + def raster_func(self, input_data, image_size, stroke_width, is_bin=True, version='v2'): + """ + :param input_data: (N_sketches,), each with (N_points, 3) + :return: raster_image_array: list (N_sketches,), each with (raster_size, raster_size), 0-BG and 1-strokes + """ + norm_test_strokes3 = self.normalize_strokes_np(input_data, image_size) + if version == 'v1': + raster_image_array = self.load_sketch_images_on_the_fly(image_size, norm_test_strokes3, stroke_width, is_bin=is_bin) + else: + raster_image_array = self.load_sketch_images_on_the_fly_v2(image_size, norm_test_strokes3, stroke_width, is_bin=is_bin) + + return raster_image_array diff --git a/robot_painting/qmupd_vs/readme.md b/robot_painting/qmupd_vs/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..8a6295cf58bfda97b8400348690fbdb26baebd84 --- /dev/null +++ b/robot_painting/qmupd_vs/readme.md @@ -0,0 +1,156 @@ +# General Virtual Sketching Framework for Vector Line Art - SIGGRAPH 2021 + +[[Paper]](https://esslab.jp/publications/HaoranSIGRAPH2021.pdf) | [[Project Page]](https://markmohr.github.io/virtual_sketching/) | [[中文Readme]](/README_CN.md) | [[中文论文介绍]](https://blog.csdn.net/qq_33000225/article/details/118883153) + +This code is used for **line drawing vectorization**, **rough sketch simplification** and **photograph to vector line drawing**. + + + +     + +## Outline +- [Dependencies](#dependencies) +- [Testing with Trained Weights](#testing-with-trained-weights) +- [Training](#training) +- [Citation](#citation) + +## Dependencies + - [Tensorflow](https://www.tensorflow.org/) (1.12.0 <= version <=1.15.0) + - [opencv](https://opencv.org/) == 3.4.2 + - [pillow](https://pillow.readthedocs.io/en/latest/index.html) == 6.2.0 + - [scipy](https://www.scipy.org/) == 1.5.2 + - [gizeh](https://github.com/Zulko/gizeh) == 0.1.11 + +## Testing with Trained Weights +### Model Preparation + +Download the models [here](https://drive.google.com/drive/folders/1-hi2cl8joZ6oMOp4yvk_hObJGAK6ELHB?usp=sharing): + - `pretrain_clean_line_drawings` (105 MB): for vectorization + - `pretrain_rough_sketches` (105 MB): for rough sketch simplification + - `pretrain_faces` (105 MB): for photograph to line drawing + +Then, place them in this file structure: +``` +outputs/ + snapshot/ + pretrain_clean_line_drawings/ + pretrain_rough_sketches/ + pretrain_faces/ +``` + +### Usage +Choose the image in the `sample_inputs/` directory, and run one of the following commands for each task. The results will be under `outputs/sampling/`. + +``` python +python3 test_vectorization.py --input muten.png + +python3 test_rough_sketch_simplification.py --input rocket.png + +python3 test_photograph_to_line.py --input 1390.png +``` + +**Note!!!** Our approach starts drawing from a randomly selected initial position, so it outputs different results in every testing trial (some might be fine and some might not be good enough). It is recommended to do several trials to select the visually best result. The number of outputs can be defined by the `--sample` argument: + +``` python +python3 test_vectorization.py --input muten.png --sample 10 + +python3 test_rough_sketch_simplification.py --input rocket.png --sample 10 + +python3 test_photograph_to_line.py --input 1390.png --sample 10 +``` + +**Reproducing Paper Figures:** our results (download from [here](https://drive.google.com/drive/folders/1-hi2cl8joZ6oMOp4yvk_hObJGAK6ELHB?usp=sharing)) are selected by doing a certain number of trials. Apparently, it is required to use the same initial drawing positions to reproduce our results. + +### Additional Tools + +#### a) Visualization + +Our vector output is stored in a `npz` package. Run the following command to obtain the rendered output and the drawing order. Results will be under the same directory of the `npz` file. +``` python +python3 tools/visualize_drawing.py --file path/to/the/result.npz +``` + +#### b) GIF Making + +To see the dynamic drawing procedure, run the following command to obtain the `gif`. Result will be under the same directory of the `npz` file. +``` python +python3 tools/gif_making.py --file path/to/the/result.npz +``` + + +#### c) Conversion to SVG + +Our vector output in a `npz` package is stored as Eq.(1) in the main paper. Run the following command to convert it to the `svg` format. Result will be under the same directory of the `npz` file. + +``` python +python3 tools/svg_conversion.py --file path/to/the/result.npz +``` + - The conversion is implemented in two modes (by setting the `--svg_type` argument): + - `single` (default): each stroke (a single segment) forms a path in the SVG file + - `cluster`: each continuous curve (with multiple strokes) forms a path in the SVG file + +**Important Notes** + +In SVG format, all the segments on a path share the same *stroke-width*. While in our stroke design, strokes on a common curve have different widths. Inside a stroke (a single segment), the thickness also changes linearly from an endpoint to another. +Therefore, neither of the two conversion methods above generate visually the same results as the ones in our paper. +*(Please mention this issue in your paper if you do qualitative comparisons with our results in SVG format.)* + + +
+ +## Training + +### Preparations + +Download the models [here](https://drive.google.com/drive/folders/1-hi2cl8joZ6oMOp4yvk_hObJGAK6ELHB?usp=sharing): + - `pretrain_neural_renderer` (40 MB): the pre-trained neural renderer + - `pretrain_perceptual_model` (691 MB): the pre-trained perceptual model for raster loss + +Download the datasets [here](https://drive.google.com/drive/folders/1-hi2cl8joZ6oMOp4yvk_hObJGAK6ELHB?usp=sharing): + - `QuickDraw-clean` (14 MB): for clean line drawing vectorization. Taken from [QuickDraw](https://github.com/googlecreativelab/quickdraw-dataset) dataset. + - `QuickDraw-rough` (361 MB): for rough sketch simplification. Synthesized by the pencil drawing generation method from [Sketch Simplification](https://github.com/bobbens/sketch_simplification#pencil-drawing-generation). + - `CelebAMask-faces` (370 MB): for photograph to line drawing. Processed from the [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ) dataset. + +Then, place them in this file structure: +``` +datasets/ + QuickDraw-clean/ + QuickDraw-rough/ + CelebAMask-faces/ +outputs/ + snapshot/ + pretrain_neural_renderer/ + pretrain_perceptual_model/ +``` + +### Running + +It is recommended to train with multi-GPU. We train each task with 2 GPUs (each with 11 GB). + +``` python +python3 train_vectorization.py + +python3 train_rough_photograph.py --data rough + +python3 train_rough_photograph.py --data face +``` + +
+ +## Citation + +If you use the code and models please cite: + +``` +@article{mo2021virtualsketching, + title = {General Virtual Sketching Framework for Vector Line Art}, + author = {Mo, Haoran and Simo-Serra, Edgar and Gao, Chengying and Zou, Changqing and Wang, Ruomei}, + journal = {ACM Transactions on Graphics (Proceedings of ACM SIGGRAPH 2021)}, + year = {2021}, + volume = {40}, + number = {4}, + pages = {51:1--51:14} +} +``` + + diff --git a/robot_painting/qmupd_vs/requirements.txt b/robot_painting/qmupd_vs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..9aa8ebfd3f1ea2691b8c75694d3772b90d489838 --- /dev/null +++ b/robot_painting/qmupd_vs/requirements.txt @@ -0,0 +1,8 @@ +torch==1.2.0 +torchvision==0.4.0 +dominate==2.4.0 +visdom==0.1.8.9 +scipy==1.1.0 +numpy==1.16.4 +Pillow==6.2.1 +opencv-python==4.1.0.25 \ No newline at end of file diff --git a/robot_painting/qmupd_vs/rnn.py b/robot_painting/qmupd_vs/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..533b0aa25e0be7424f74c766a228859a30754d27 --- /dev/null +++ b/robot_painting/qmupd_vs/rnn.py @@ -0,0 +1,499 @@ +# Copyright 2019 The Magenta Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SketchRNN RNN definition.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + + +def orthogonal(shape): + """Orthogonal initilaizer.""" + flat_shape = (shape[0], np.prod(shape[1:])) + a = np.random.normal(0.0, 1.0, flat_shape) + u, _, v = np.linalg.svd(a, full_matrices=False) + q = u if u.shape == flat_shape else v + return q.reshape(shape) + + +def orthogonal_initializer(scale=1.0): + """Orthogonal initializer.""" + + def _initializer(shape, dtype=tf.float32, + partition_info=None): # pylint: disable=unused-argument + return tf.constant(orthogonal(shape) * scale, dtype) + + return _initializer + + +def lstm_ortho_initializer(scale=1.0): + """LSTM orthogonal initializer.""" + + def _initializer(shape, dtype=tf.float32, + partition_info=None): # pylint: disable=unused-argument + size_x = shape[0] + size_h = shape[1] // 4 # assumes lstm. + t = np.zeros(shape) + t[:, :size_h] = orthogonal([size_x, size_h]) * scale + t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale + t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale + t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale + return tf.constant(t, dtype) + + return _initializer + + +class LSTMCell(tf.contrib.rnn.RNNCell): + """Vanilla LSTM cell. + + Uses ortho initializer, and also recurrent dropout without memory loss + (https://arxiv.org/abs/1603.05118) + """ + + def __init__(self, + num_units, + forget_bias=1.0, + use_recurrent_dropout=False, + dropout_keep_prob=0.9): + self.num_units = num_units + self.forget_bias = forget_bias + self.use_recurrent_dropout = use_recurrent_dropout + self.dropout_keep_prob = dropout_keep_prob + + @property + def state_size(self): + return 2 * self.num_units + + @property + def output_size(self): + return self.num_units + + def get_output(self, state): + unused_c, h = tf.split(state, 2, 1) + return h + + def __call__(self, x, state, scope=None): + with tf.variable_scope(scope or type(self).__name__): + c, h = tf.split(state, 2, 1) + + x_size = x.get_shape().as_list()[1] + + w_init = None # uniform + + h_init = lstm_ortho_initializer(1.0) + + # Keep W_xh and W_hh separate here as well to use different init methods. + w_xh = tf.get_variable( + 'W_xh', [x_size, 4 * self.num_units], initializer=w_init) + w_hh = tf.get_variable( + 'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init) + bias = tf.get_variable( + 'bias', [4 * self.num_units], + initializer=tf.constant_initializer(0.0)) + + concat = tf.concat([x, h], 1) + w_full = tf.concat([w_xh, w_hh], 0) + hidden = tf.matmul(concat, w_full) + bias + + i, j, f, o = tf.split(hidden, 4, 1) + + if self.use_recurrent_dropout: + g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob) + else: + g = tf.tanh(j) + + new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g + new_h = tf.tanh(new_c) * tf.sigmoid(o) + + return new_h, tf.concat([new_c, new_h], 1) # fuk tuples. + + +def layer_norm_all(h, + batch_size, + base, + num_units, + scope='layer_norm', + reuse=False, + gamma_start=1.0, + epsilon=1e-3, + use_bias=True): + """Layer Norm (faster version, but not using defun).""" + # Performs layer norm on multiple base at once (ie, i, g, j, o for lstm) + # Reshapes h in to perform layer norm in parallel + h_reshape = tf.reshape(h, [batch_size, base, num_units]) + mean = tf.reduce_mean(h_reshape, [2], keep_dims=True) + var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True) + epsilon = tf.constant(epsilon) + rstd = tf.rsqrt(var + epsilon) + h_reshape = (h_reshape - mean) * rstd + # reshape back to original + h = tf.reshape(h_reshape, [batch_size, base * num_units]) + with tf.variable_scope(scope): + if reuse: + tf.get_variable_scope().reuse_variables() + gamma = tf.get_variable( + 'ln_gamma', [4 * num_units], + initializer=tf.constant_initializer(gamma_start)) + if use_bias: + beta = tf.get_variable( + 'ln_beta', [4 * num_units], initializer=tf.constant_initializer(0.0)) + if use_bias: + return gamma * h + beta + return gamma * h + + +def layer_norm(x, + num_units, + scope='layer_norm', + reuse=False, + gamma_start=1.0, + epsilon=1e-3, + use_bias=True): + """Calculate layer norm.""" + axes = [1] + mean = tf.reduce_mean(x, axes, keep_dims=True) + x_shifted = x - mean + var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True) + inv_std = tf.rsqrt(var + epsilon) + with tf.variable_scope(scope): + if reuse: + tf.get_variable_scope().reuse_variables() + gamma = tf.get_variable( + 'ln_gamma', [num_units], + initializer=tf.constant_initializer(gamma_start)) + if use_bias: + beta = tf.get_variable( + 'ln_beta', [num_units], initializer=tf.constant_initializer(0.0)) + output = gamma * (x_shifted) * inv_std + if use_bias: + output += beta + return output + + +def raw_layer_norm(x, epsilon=1e-3): + axes = [1] + mean = tf.reduce_mean(x, axes, keep_dims=True) + std = tf.sqrt( + tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True) + epsilon) + output = (x - mean) / (std) + return output + + +def super_linear(x, + output_size, + scope=None, + reuse=False, + init_w='ortho', + weight_start=0.0, + use_bias=True, + bias_start=0.0, + input_size=None): + """Performs linear operation. Uses ortho init defined earlier.""" + shape = x.get_shape().as_list() + with tf.variable_scope(scope or 'linear'): + if reuse: + tf.get_variable_scope().reuse_variables() + + w_init = None # uniform + if input_size is None: + x_size = shape[1] + else: + x_size = input_size + if init_w == 'zeros': + w_init = tf.constant_initializer(0.0) + elif init_w == 'constant': + w_init = tf.constant_initializer(weight_start) + elif init_w == 'gaussian': + w_init = tf.random_normal_initializer(stddev=weight_start) + elif init_w == 'ortho': + w_init = lstm_ortho_initializer(1.0) + + w = tf.get_variable( + 'super_linear_w', [x_size, output_size], tf.float32, initializer=w_init) + if use_bias: + b = tf.get_variable( + 'super_linear_b', [output_size], + tf.float32, + initializer=tf.constant_initializer(bias_start)) + return tf.matmul(x, w) + b + return tf.matmul(x, w) + + +class LayerNormLSTMCell(tf.contrib.rnn.RNNCell): + """Layer-Norm, with Ortho Init. and Recurrent Dropout without Memory Loss. + + https://arxiv.org/abs/1607.06450 - Layer Norm + https://arxiv.org/abs/1603.05118 - Recurrent Dropout without Memory Loss + """ + + def __init__(self, + num_units, + forget_bias=1.0, + use_recurrent_dropout=False, + dropout_keep_prob=0.90): + """Initialize the Layer Norm LSTM cell. + + Args: + num_units: int, The number of units in the LSTM cell. + forget_bias: float, The bias added to forget gates (default 1.0). + use_recurrent_dropout: Whether to use Recurrent Dropout (default False) + dropout_keep_prob: float, dropout keep probability (default 0.90) + """ + self.num_units = num_units + self.forget_bias = forget_bias + self.use_recurrent_dropout = use_recurrent_dropout + self.dropout_keep_prob = dropout_keep_prob + + @property + def input_size(self): + return self.num_units + + @property + def output_size(self): + return self.num_units + + @property + def state_size(self): + return 2 * self.num_units + + def get_output(self, state): + h, unused_c = tf.split(state, 2, 1) + return h + + def __call__(self, x, state, timestep=0, scope=None): + with tf.variable_scope(scope or type(self).__name__): + h, c = tf.split(state, 2, 1) + + h_size = self.num_units + x_size = x.get_shape().as_list()[1] + batch_size = x.get_shape().as_list()[0] + + w_init = None # uniform + + h_init = lstm_ortho_initializer(1.0) + + w_xh = tf.get_variable( + 'W_xh', [x_size, 4 * self.num_units], initializer=w_init) + w_hh = tf.get_variable( + 'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init) + + concat = tf.concat([x, h], 1) # concat for speed. + w_full = tf.concat([w_xh, w_hh], 0) + concat = tf.matmul(concat, w_full) # + bias # live life without garbage. + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + concat = layer_norm_all(concat, batch_size, 4, h_size, 'ln_all') + i, j, f, o = tf.split(concat, 4, 1) + + if self.use_recurrent_dropout: + g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob) + else: + g = tf.tanh(j) + + new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g + new_h = tf.tanh(layer_norm(new_c, h_size, 'ln_c')) * tf.sigmoid(o) + + return new_h, tf.concat([new_h, new_c], 1) + + +class HyperLSTMCell(tf.contrib.rnn.RNNCell): + """HyperLSTM with Ortho Init, Layer Norm, Recurrent Dropout, no Memory Loss. + + https://arxiv.org/abs/1609.09106 + http://blog.otoro.net/2016/09/28/hyper-networks/ + """ + + def __init__(self, + num_units, + forget_bias=1.0, + use_recurrent_dropout=False, + dropout_keep_prob=0.90, + use_layer_norm=True, + hyper_num_units=256, + hyper_embedding_size=32, + hyper_use_recurrent_dropout=False): + """Initialize the Layer Norm HyperLSTM cell. + + Args: + num_units: int, The number of units in the LSTM cell. + forget_bias: float, The bias added to forget gates (default 1.0). + use_recurrent_dropout: Whether to use Recurrent Dropout (default False) + dropout_keep_prob: float, dropout keep probability (default 0.90) + use_layer_norm: boolean. (default True) + Controls whether we use LayerNorm layers in main LSTM & HyperLSTM cell. + hyper_num_units: int, number of units in HyperLSTM cell. + (default is 128, recommend experimenting with 256 for larger tasks) + hyper_embedding_size: int, size of signals emitted from HyperLSTM cell. + (default is 16, recommend trying larger values for large datasets) + hyper_use_recurrent_dropout: boolean. (default False) + Controls whether HyperLSTM cell also uses recurrent dropout. + Recommend turning this on only if hyper_num_units becomes large (>= 512) + """ + self.num_units = num_units + self.forget_bias = forget_bias + self.use_recurrent_dropout = use_recurrent_dropout + self.dropout_keep_prob = dropout_keep_prob + self.use_layer_norm = use_layer_norm + self.hyper_num_units = hyper_num_units + self.hyper_embedding_size = hyper_embedding_size + self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout + + self.total_num_units = self.num_units + self.hyper_num_units + + if self.use_layer_norm: + cell_fn = LayerNormLSTMCell + else: + cell_fn = LSTMCell + self.hyper_cell = cell_fn( + hyper_num_units, + use_recurrent_dropout=hyper_use_recurrent_dropout, + dropout_keep_prob=dropout_keep_prob) + + @property + def input_size(self): + return self._input_size + + @property + def output_size(self): + return self.num_units + + @property + def state_size(self): + return 2 * self.total_num_units + + def get_output(self, state): + total_h, unused_total_c = tf.split(state, 2, 1) + h = total_h[:, 0:self.num_units] + return h + + def hyper_norm(self, layer, scope='hyper', use_bias=True): + num_units = self.num_units + embedding_size = self.hyper_embedding_size + # recurrent batch norm init trick (https://arxiv.org/abs/1603.09025). + init_gamma = 0.10 # cooijmans' da man. + with tf.variable_scope(scope): + zw = super_linear( + self.hyper_output, + embedding_size, + init_w='constant', + weight_start=0.00, + use_bias=True, + bias_start=1.0, + scope='zw') + alpha = super_linear( + zw, + num_units, + init_w='constant', + weight_start=init_gamma / embedding_size, + use_bias=False, + scope='alpha') + result = tf.multiply(alpha, layer) + if use_bias: + zb = super_linear( + self.hyper_output, + embedding_size, + init_w='gaussian', + weight_start=0.01, + use_bias=False, + bias_start=0.0, + scope='zb') + beta = super_linear( + zb, + num_units, + init_w='constant', + weight_start=0.00, + use_bias=False, + scope='beta') + result += beta + return result + + def __call__(self, x, state, timestep=0, scope=None): + with tf.variable_scope(scope or type(self).__name__): + total_h, total_c = tf.split(state, 2, 1) + h = total_h[:, 0:self.num_units] + c = total_c[:, 0:self.num_units] + self.hyper_state = tf.concat( + [total_h[:, self.num_units:], total_c[:, self.num_units:]], 1) + + batch_size = x.get_shape().as_list()[0] + x_size = x.get_shape().as_list()[1] + self._input_size = x_size + + w_init = None # uniform + + h_init = lstm_ortho_initializer(1.0) + + w_xh = tf.get_variable( + 'W_xh', [x_size, 4 * self.num_units], initializer=w_init) + w_hh = tf.get_variable( + 'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init) + bias = tf.get_variable( + 'bias', [4 * self.num_units], + initializer=tf.constant_initializer(0.0)) + + # concatenate the input and hidden states for hyperlstm input + hyper_input = tf.concat([x, h], 1) + hyper_output, hyper_new_state = self.hyper_cell(hyper_input, + self.hyper_state) + self.hyper_output = hyper_output + self.hyper_state = hyper_new_state + + xh = tf.matmul(x, w_xh) + hh = tf.matmul(h, w_hh) + + # split Wxh contributions + ix, jx, fx, ox = tf.split(xh, 4, 1) + ix = self.hyper_norm(ix, 'hyper_ix', use_bias=False) + jx = self.hyper_norm(jx, 'hyper_jx', use_bias=False) + fx = self.hyper_norm(fx, 'hyper_fx', use_bias=False) + ox = self.hyper_norm(ox, 'hyper_ox', use_bias=False) + + # split Whh contributions + ih, jh, fh, oh = tf.split(hh, 4, 1) + ih = self.hyper_norm(ih, 'hyper_ih', use_bias=True) + jh = self.hyper_norm(jh, 'hyper_jh', use_bias=True) + fh = self.hyper_norm(fh, 'hyper_fh', use_bias=True) + oh = self.hyper_norm(oh, 'hyper_oh', use_bias=True) + + # split bias + ib, jb, fb, ob = tf.split(bias, 4, 0) # bias is to be broadcasted. + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + i = ix + ih + ib + j = jx + jh + jb + f = fx + fh + fb + o = ox + oh + ob + + if self.use_layer_norm: + concat = tf.concat([i, j, f, o], 1) + concat = layer_norm_all(concat, batch_size, 4, self.num_units, 'ln_all') + i, j, f, o = tf.split(concat, 4, 1) + + if self.use_recurrent_dropout: + g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob) + else: + g = tf.tanh(j) + + new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g + new_h = tf.tanh(layer_norm(new_c, self.num_units, 'ln_c')) * tf.sigmoid(o) + + hyper_h, hyper_c = tf.split(hyper_new_state, 2, 1) + new_total_h = tf.concat([new_h, hyper_h], 1) + new_total_c = tf.concat([new_c, hyper_c], 1) + new_total_state = tf.concat([new_total_h, new_total_c], 1) + return new_h, new_total_state diff --git a/robot_painting/qmupd_vs/subnet_tf_utils.py b/robot_painting/qmupd_vs/subnet_tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4c4726aac4c02e40ccde4b032f592fe042e0b3e4 --- /dev/null +++ b/robot_painting/qmupd_vs/subnet_tf_utils.py @@ -0,0 +1,989 @@ +import tensorflow as tf + + +def get_initializer(init_method): + if init_method == 'xavier_normal': + initializer = tf.glorot_normal_initializer() + elif init_method == 'xavier_uniform': + initializer = tf.glorot_uniform_initializer() + elif init_method == 'he_normal': + initializer = tf.keras.initializers.he_normal() + elif init_method == 'he_uniform': + initializer = tf.keras.initializers.he_uniform() + elif init_method == 'lecun_normal': + initializer = tf.keras.initializers.lecun_normal() + elif init_method == 'lecun_uniform': + initializer = tf.keras.initializers.lecun_uniform() + else: + raise Exception('Unknown initializer:', init_method) + return initializer + + +def lrelu(x, leak=0.2, name="lrelu", alt_relu_impl=False): + with tf.variable_scope(name) as scope: + if alt_relu_impl: + f1 = 0.5 * (1 + leak) + f2 = 0.5 * (1 - leak) + return f1 * x + f2 * abs(x) + else: + return tf.maximum(x, leak * x) + + +def batchnorm(input, name='batch_norm', init_method=None): + if init_method is not None: + initializer = get_initializer(init_method) + else: + initializer = tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32) + + with tf.variable_scope(name): + # this block looks like it has 3 inputs on the graph unless we do this + input = tf.identity(input) + + channels = input.get_shape()[3] + offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer()) + scale = tf.get_variable("scale", [channels], dtype=tf.float32, + initializer=initializer) + mean, variance = tf.nn.moments(input, axes=[0, 1, 2], keep_dims=False) + variance_epsilon = 1e-5 + normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon) + return normalized + + +def layernorm(input, name='layer_norm', init_method=None): + if init_method is not None: + initializer = get_initializer(init_method) + else: + initializer = tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32) + + with tf.variable_scope(name): + n_neurons = input.get_shape()[3] + offset = tf.get_variable("offset", [n_neurons], dtype=tf.float32, initializer=tf.zeros_initializer()) + scale = tf.get_variable("scale", [n_neurons], dtype=tf.float32, + initializer=initializer) + offset = tf.reshape(offset, [1, 1, -1]) + scale = tf.reshape(scale, [1, 1, -1]) + mean, variance = tf.nn.moments(input, axes=[1, 2, 3], keep_dims=True) + variance_epsilon = 1e-5 + normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon) + return normalized + + +def instance_norm(input, name="instance_norm", init_method=None): + if init_method is not None: + initializer = get_initializer(init_method) + else: + initializer = tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32) + + with tf.variable_scope(name): + depth = input.get_shape()[3] + scale = tf.get_variable("scale", [depth], initializer=initializer) + offset = tf.get_variable("offset", [depth], initializer=tf.constant_initializer(0.0)) + mean, variance = tf.nn.moments(input, axes=[1, 2], keep_dims=True) + epsilon = 1e-5 + inv = tf.rsqrt(variance + epsilon) + normalized = (input - mean) * inv + return scale * normalized + offset + + +def linear1d(inputlin, inputdim, outputdim, name="linear1d", init_method=None): + if init_method is not None: + initializer = get_initializer(init_method) + else: + initializer = None + + with tf.variable_scope(name) as scope: + weight = tf.get_variable("weight", [inputdim, outputdim], initializer=initializer) + bias = tf.get_variable("bias", [outputdim], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) + return tf.matmul(inputlin, weight) + bias + + +def general_conv2d(inputconv, output_dim=64, filter_height=4, filter_width=4, stride_height=2, stride_width=2, + stddev=0.02, padding="SAME", name="conv2d", do_norm=True, norm_type='instance_norm', do_relu=True, + relufactor=0, is_training=True, init_method=None): + if init_method is not None: + initializer = get_initializer(init_method) + else: + initializer = tf.truncated_normal_initializer(stddev=stddev) + + with tf.variable_scope(name) as scope: + conv = tf.contrib.layers.conv2d(inputconv, output_dim, [filter_width, filter_height], + [stride_width, stride_height], padding, activation_fn=None, + weights_initializer=initializer, + biases_initializer=tf.constant_initializer(0.0)) + if do_norm: + if norm_type == 'instance_norm': + conv = instance_norm(conv, init_method=init_method) + # conv = tf.contrib.layers.instance_norm(conv, epsilon=1e-05, center=True, scale=True, + # scope='instance_norm') + elif norm_type == 'batch_norm': + # conv = batchnorm(conv, init_method=init_method) + conv = tf.contrib.layers.batch_norm(conv, decay=0.9, is_training=is_training, updates_collections=None, + epsilon=1e-5, center=True, scale=True, scope="batch_norm") + elif norm_type == 'layer_norm': + # conv = layernorm(conv, init_method=init_method) + conv = tf.contrib.layers.layer_norm(conv, center=True, scale=True, scope='layer_norm') + + if do_relu: + if relufactor == 0: + conv = tf.nn.relu(conv, "relu") + else: + conv = lrelu(conv, relufactor, "lrelu") + + return conv + + +def generative_cnn_c3_encoder(inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + tensor_x = inputs + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 256] + + tensor_x = tf.reshape(tensor_x, (-1, 256 * 4 * 4)) + tensor_x = linear1d(tensor_x, 256 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_c3_encoder_deeper(inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + tensor_x = inputs + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 512] + + tensor_x = tf.reshape(tensor_x, (-1, 512 * 4 * 4)) + tensor_x = linear1d(tensor_x, 512 * 4 * 4, 512, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_c3_encoder_combine33(local_inputs, global_inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + local_x = local_inputs + global_x = global_inputs + + with tf.variable_scope('Local_Encoder', reuse=tf.AUTO_REUSE): + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + with tf.variable_scope('Global_Encoder', reuse=tf.AUTO_REUSE): + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + tensor_x = tf.concat([local_x, global_x], axis=-1) + + with tf.variable_scope('Combined_Encoder', reuse=tf.AUTO_REUSE): + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 256] + + tensor_x = tf.reshape(tensor_x, (-1, 512 * 4 * 4)) + tensor_x = linear1d(tensor_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_c3_encoder_combine43(local_inputs, global_inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + local_x = local_inputs + global_x = global_inputs + + with tf.variable_scope('Local_Encoder', reuse=tf.AUTO_REUSE): + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + with tf.variable_scope('Global_Encoder', reuse=tf.AUTO_REUSE): + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + tensor_x = tf.concat([local_x, global_x], axis=-1) + + with tf.variable_scope('Combined_Encoder', reuse=tf.AUTO_REUSE): + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 256] + + tensor_x = tf.reshape(tensor_x, (-1, 512 * 4 * 4)) + tensor_x = linear1d(tensor_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_c3_encoder_combine53(local_inputs, global_inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + local_x = local_inputs + global_x = global_inputs + + with tf.variable_scope('Local_Encoder', reuse=tf.AUTO_REUSE): + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + + with tf.variable_scope('Global_Encoder', reuse=tf.AUTO_REUSE): + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + + tensor_x = tf.concat([local_x, global_x], axis=-1) + + with tf.variable_scope('Combined_Encoder', reuse=tf.AUTO_REUSE): + tensor_x_sp = tensor_x # [N, h, w, 256] + tensor_x = tf.reshape(tensor_x, (-1, 1024 * 4 * 4)) + tensor_x = linear1d(tensor_x, 1024 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_c3_encoder_combineFC(local_inputs, global_inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + local_x = local_inputs + global_x = global_inputs + + with tf.variable_scope('Local_Encoder', reuse=tf.AUTO_REUSE): + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + + local_x = tf.reshape(local_x, (-1, 512 * 4 * 4)) + local_x = linear1d(local_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + local_x = tf.nn.dropout(local_x, drop_keep_prob) + + with tf.variable_scope('Global_Encoder', reuse=tf.AUTO_REUSE): + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + + global_x = tf.reshape(global_x, (-1, 512 * 4 * 4)) + global_x = linear1d(global_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + global_x = tf.nn.dropout(global_x, drop_keep_prob) + + tensor_x_sp = None + tensor_x = tf.concat([local_x, global_x], axis=-1) + return tensor_x, tensor_x_sp + + +def generative_cnn_c3_encoder_combineFC_jointAttn(local_inputs, global_inputs, is_training=True, drop_keep_prob=0.5, + init_method=None, combine_manner='attn'): + local_x = local_inputs + global_x = global_inputs + + with tf.variable_scope('Local_Encoder', reuse=tf.AUTO_REUSE): + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + share_x = local_x + + with tf.variable_scope('Attn_branch', reuse=tf.AUTO_REUSE): + attn_x = general_conv2d(share_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + attn_x = general_conv2d(attn_x, 32, filter_height=1, filter_width=1, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + attn_x = general_conv2d(attn_x, 1, filter_height=1, filter_width=1, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + attn_map = tf.nn.sigmoid(attn_x) # (N, H/8, W/8, 1), [0.0, 1.0] + + if combine_manner == 'attn': + attn_feat = attn_map * share_x + share_x + else: + raise Exception('Unknown combine_manner', combine_manner) + + local_x = general_conv2d(attn_feat, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + + local_x = tf.reshape(local_x, (-1, 512 * 4 * 4)) + local_x = linear1d(local_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + local_x = tf.nn.dropout(local_x, drop_keep_prob) + + with tf.variable_scope('Global_Encoder', reuse=tf.AUTO_REUSE): + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + + global_x = tf.reshape(global_x, (-1, 512 * 4 * 4)) + global_x = linear1d(global_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + global_x = tf.nn.dropout(global_x, drop_keep_prob) + + tensor_x_sp = None + tensor_x = tf.concat([local_x, global_x], axis=-1) + return tensor_x, tensor_x_sp, attn_map + + +def generative_cnn_c3_encoder_combineFC_sepAttn(local_inputs, global_inputs, is_training=True, drop_keep_prob=0.5, + init_method=None, combine_manner='attn'): + local_x = local_inputs + global_x = global_inputs + + with tf.variable_scope('Attn_branch', reuse=tf.AUTO_REUSE): + attn_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + attn_x = general_conv2d(attn_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + attn_x = general_conv2d(attn_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + attn_x = general_conv2d(attn_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + attn_x = general_conv2d(attn_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + attn_x = general_conv2d(attn_x, 32, filter_height=1, filter_width=1, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + attn_x = general_conv2d(attn_x, 1, filter_height=1, filter_width=1, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + attn_map = tf.nn.sigmoid(attn_x) # (N, H/8, W/8, 1), [0.0, 1.0] + + with tf.variable_scope('Local_Encoder', reuse=tf.AUTO_REUSE): + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + local_x = general_conv2d(local_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + local_x = general_conv2d(local_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + local_x = general_conv2d(local_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + if combine_manner == 'attn': + attn_feat = attn_map * local_x + local_x + elif combine_manner == 'channel': + attn_feat = tf.concat([local_x, attn_map], axis=-1) + else: + raise Exception('Unknown combine_manner', combine_manner) + + local_x = general_conv2d(attn_feat, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + local_x = general_conv2d(local_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + local_x = general_conv2d(local_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + + local_x = tf.reshape(local_x, (-1, 512 * 4 * 4)) + local_x = linear1d(local_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + local_x = tf.nn.dropout(local_x, drop_keep_prob) + + with tf.variable_scope('Global_Encoder', reuse=tf.AUTO_REUSE): + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + global_x = general_conv2d(global_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + global_x = general_conv2d(global_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + global_x = general_conv2d(global_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + global_x = general_conv2d(global_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + global_x = general_conv2d(global_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + + global_x = tf.reshape(global_x, (-1, 512 * 4 * 4)) + global_x = linear1d(global_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + global_x = tf.nn.dropout(global_x, drop_keep_prob) + + tensor_x_sp = None + tensor_x = tf.concat([local_x, global_x], axis=-1) + return tensor_x, tensor_x_sp, attn_map + + +def generative_cnn_c3_encoder_deeper13(inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + tensor_x = inputs + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 256] + + tensor_x = tf.reshape(tensor_x, (-1, 512 * 4 * 4)) + tensor_x = linear1d(tensor_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_c3_encoder_deeper13_attn(inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + tensor_x = inputs + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_1", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_1_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_2_2", init_method=init_method) + + tensor_x = self_attention(tensor_x, 64) + + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_3", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_3_3", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_4_3", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, + is_training=is_training, name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, filter_height=3, filter_width=3, stride_height=1, stride_width=1, + is_training=is_training, name="CNN_ENC_5_3", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 256] + + tensor_x = tf.reshape(tensor_x, (-1, 512 * 4 * 4)) + tensor_x = linear1d(tensor_x, 512 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_encoder(inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + tensor_x = inputs + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: + tensor_x = general_conv2d(tensor_x, 32, is_training=is_training, name="CNN_ENC_1", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 32, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_1_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, is_training=is_training, name="CNN_ENC_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_2_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 128, is_training=is_training, name="CNN_ENC_3", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_3_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 256, is_training=is_training, name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_4_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, is_training=is_training, name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_5_2", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 256] + + tensor_x = tf.reshape(tensor_x, (-1, 256 * 4 * 4)) + tensor_x = linear1d(tensor_x, 256 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_encoder_deeper(inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + tensor_x = inputs + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: + tensor_x = general_conv2d(tensor_x, 32, is_training=is_training, name="CNN_ENC_1", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 32, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_1_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, is_training=is_training, name="CNN_ENC_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_2_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 128, is_training=is_training, name="CNN_ENC_3", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_3_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 256, is_training=is_training, name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_4_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, is_training=is_training, name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 512, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_5_2", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 512] + + tensor_x = tf.reshape(tensor_x, (-1, 512 * 4 * 4)) + tensor_x = linear1d(tensor_x, 512 * 4 * 4, 512, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def generative_cnn_encoder_deeper13(inputs, is_training=True, drop_keep_prob=0.5, init_method=None): + tensor_x = inputs + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: + tensor_x = general_conv2d(tensor_x, 32, is_training=is_training, + name="CNN_ENC_1", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 32, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_1_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 64, is_training=is_training, + name="CNN_ENC_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_2_2", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 128, is_training=is_training, + name="CNN_ENC_3", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_3_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_3_3", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 256, is_training=is_training, + name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_4_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_4_3", init_method=init_method) + + tensor_x = general_conv2d(tensor_x, 256, is_training=is_training, + name="CNN_ENC_5", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_5_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 256, stride_height=1, stride_width=1, is_training=is_training, + name="CNN_ENC_5_3", init_method=init_method) + tensor_x_sp = tensor_x # [N, h, w, 256] + + tensor_x = tf.reshape(tensor_x, (-1, 256 * 4 * 4)) + tensor_x = linear1d(tensor_x, 256 * 4 * 4, 128, name='CNN_ENC_FC', init_method=init_method) + + if is_training: + tensor_x = tf.nn.dropout(tensor_x, drop_keep_prob) + + return tensor_x, tensor_x_sp + + +def max_pooling(x) : + return tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='SAME') + + +def hw_flatten(x) : + return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]]) + + +def self_attention(x, in_channel, name='self_attention'): + with tf.variable_scope(name) as scope: + f = general_conv2d(x, in_channel // 8, filter_height=1, filter_width=1, stride_height=1, stride_width=1, + do_norm=False, do_relu=False, name='f_conv') # (N, h, w, c') + f = max_pooling(f) # (N, h', w', c') + g = general_conv2d(x, in_channel // 8, filter_height=1, filter_width=1, stride_height=1, stride_width=1, + do_norm=False, do_relu=False, name='g_conv') # (N, h, w, c') + h = general_conv2d(x, in_channel, filter_height=1, filter_width=1, stride_height=1, stride_width=1, + do_norm=False, do_relu=False, name='h_conv') # (N, h, w, c) + h = max_pooling(h) # (N, h', w', c) + + # M = h * w, M' = h' * w' + s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # (N, M, M') + beta = tf.nn.softmax(s) # attention map + + o = tf.matmul(beta, hw_flatten(h)) # (N, M, c) + gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0)) + + o = tf.reshape(o, shape=x.shape) # (N, h, w, c) + o = general_conv2d(o, in_channel, filter_height=1, filter_width=1, stride_height=1, stride_width=1, + do_norm=False, do_relu=False, name='attn_conv') + + x = gamma * o + x + + return x + + +def global_avg_pooling(x): + gap = tf.reduce_mean(x, axis=[1, 2]) + return gap + + +def cnn_discriminator_wgan_gp(discrim_inputs, discrim_targets, init_method=None): + tensor_x = tf.concat([discrim_inputs, discrim_targets], axis=3) # (N, H, W, 3 + 1) + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: + tensor_x = general_conv2d(tensor_x, 32, filter_height=3, filter_width=3, + is_training=True, name="CNN_ENC_1", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 64, filter_height=3, filter_width=3, + is_training=True, name="CNN_ENC_2", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, + is_training=True, name="CNN_ENC_3", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 128, filter_height=3, filter_width=3, + is_training=True, name="CNN_ENC_4", init_method=init_method) + tensor_x = general_conv2d(tensor_x, 1, filter_height=3, filter_width=3, + is_training=True, name="CNN_ENC_5", init_method=init_method) + # (N, H/32, W/32, 1) + + d_out = global_avg_pooling(tensor_x) # (N, 1) + + return d_out diff --git a/robot_painting/qmupd_vs/test.ipynb b/robot_painting/qmupd_vs/test.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..b9357d64c7e0b8eb838926a54ca43bf493a2c233 --- /dev/null +++ b/robot_painting/qmupd_vs/test.ipynb @@ -0,0 +1,613 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "84e63b7a58894e46b51e77dea3daf965", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "CameraStream(constraints={'facing_mode': 'user', 'audio': False, 'video': {'width': 512, 'height': 512, 'facin…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "499191332bd54d10a7a08f6bd80ef7a0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "ImageRecorder(image=Image(value=b''), stream=CameraStream(constraints={'facing_mode': 'user', 'audio': False, …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from ipywebrtc import CameraStream, ImageRecorder\n", + "from IPython.display import display\n", + "import PIL.Image\n", + "import io\n", + "import numpy\n", + "import cv2\n", + "from ipywebrtc import CameraStream\n", + "camera = CameraStream.facing_user(audio=False, constraints={\n", + " 'facing_mode': 'user',\n", + " 'audio': False,\n", + " 'video': { 'width': 512, 'height': 512 }\n", + "})\n", + "display(camera)\n", + "recorder = ImageRecorder(stream=camera)\n", + "display(recorder)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAicAAADCCAYAAACSRmLFAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAADBJklEQVR4nOydd3hUZfq/7+kzyWTSe28EQiCU0EITkCoWEHQtYFt3ddW1Ytvvquuuva5rXXVxERVlsaKAVOkt1JCQhIT0nkmbTJ85vz9yzfkxJoEEqXru6+LSmXPmnDOT97zneZ/yeWSCIAhISEhISEhISFwgyM/3BUhISEhISEhInIhknEhISEhISEhcUEjGiYSEhISEhMQFhWScSEhISEhISFxQSMaJhISEhISExAWFZJxISEhISEhIXFBIxomEhISEhITEBYVknEhISEhISEhcUEjGiYSEhISEhMQFhWScSEhISEhISFxQnDfj5K233iIhIQGtVsuoUaPYvXv3+boUCYk+IY1diYsVaexKXCycF+Pk888/54EHHuDJJ59k3759ZGZmMn36dOrr68/H5UhI9Bpp7EpcrEhjV+JiQnY+Gv+NGjWKESNG8OabbwLgdruJjY3lnnvu4dFHHz3l591uN9XV1fj5+SGTyc725Ur8ShEEgfb2dqKiopDLe2enS2NX4kJAGrsSFyu9HbvKc3hNANjtdnJycnjsscfE9+RyOZdeeik7duzo9jM2mw2bzSa+rqqqIj09/axfq8Rvg4qKCmJiYk65nzR2JS40pLErcbFyqrF7zo2TxsZGXC4X4eHhXu+Hh4dz9OjRbj/z3HPP8be//a3L+xUVFRgMhrNynRK/ftra2oiNjcXPz69X+0tjV+JCQRq7Ehcr33zzDQsXLjzl2D3nxsnp8Nhjj/HAAw+Irz03psFgkG4SiV/M2XRRS2NX4mwijV2Jiw0fHx/g1GP3nBsnISEhKBQK6urqvN6vq6sjIiKi289oNBo0Gs25uDwJiR6Rxq7ExYo0diUuNs55tY5arWb48OGsX79efM/tdrN+/XrGjBlzri9HQqLXSGNX4mJFGrsSFxvnJazzwAMPcNNNN5GVlcXIkSN5/fXX6ejo4JZbbjkflyMh0WuksStxsSKNXYmLifNinFx77bU0NDTwxBNPUFtby5AhQ1i9enWXZC0JiQsNaexKXKxIY1fiYuK86Jz8Utra2vD396e1tVVKzJI4bc7HOJLGrsSZQBq7EhcrK1asYN68eaccR1JvHQkJCQkJCYkLCsk4kZCQkJCQkLigkIwTCQkJCQkJiQuKi0KETeLc4HA4OHbsGIWFhQQGBpKenk5wcLDUR0NCQkLiLCAIAnV1deTm5lJVVYVerycpKYm0tDRRrOy3imSc/MawWq0oFApUKhWCIGCxWMjPz2fTpk389NNPbNq0ifb2dtRqNWFhYVx33XXMmzePiooKhg0bRmJi4vn+ChISEhIXDVarlerqarRaLQEBATgcDmpraykpKaGoqAiZTEZ8fDzJycmYTCYOHDjAtm3biI2NpbW1lbS0NEaMGNHrBo+/FiTj5FeKIAg4HA6MRiM1NTWUlpaye/duVq9eTXBwMMnJybS3t7N//36qqqpob28HIDQ0lAkTJqBQKMjJyeHll1/m9ddfx8fHh4iICP7v//6Pfv36kZGR8Zu37CUkJCQ8CIKA0WjEbDZTVVWF0Wikvr6e5uZmWltbvRaFdrud/v37ExUVRUZGBocPH+bbb7/F6XSSlZVFamoqLS0tVFZW0tLSQm5uLmFhYQwcOJCEhITfhKEiGSe/IhwOBwUFBezcuZOVK1fS0NBAQUEBFosFs9nste+JSpHh4eFERkYybdo0XC6X2AgsNDSUqVOn8tlnnxEbG0t+fj4LFixAq9UyZMgQ7rvvPi677DL0ev05/Z4SEhISFwp1dXUcPHiQsrIympqa0Ol0hIWF4ePjQ0xMDK2treh0OqxWK1arlbi4OKxWK3V1dbS2trJmzRrkcjk+Pj4IgkBxcTE7d+4kOzsbq9WK2+0WDZUDBw4QHx/P2LFjSUxM/FUbKZJx8ivAZDKRk5PD66+/ztq1azGbzQiCgEqlIiQkhMGDB6NQKNi0aRNOp9Prs2PHjmXo0KGsWrWKlStXUlFRgcvlErdXVVXhdDrJzc0V37NarezcuZMbb7yRrKws7r//fubMmYNKpTpn31lCQkLifFJWVsa+ffvIz8/H5XKh0+nQaDQMHjyYsrIyamtraWtrQ61WExoaiiAI+Pr6UltbS2NjI/X19cTGxhIaGopGo0EmkyGXywkLC8NkMlFVVUVkZCTt7e3Y7Xbkcjl+fn7U1dWxZMkSUlNTueKKK361mjOScXIR43A42LFjB08//TRbt27FZrMBoFAoGDduHA888ABpaWkcOnSITz75xMvoAIiKimL06NG89dZbWK3Wbs+RmJhIYGAghYWFXbYplUoSExO59dZbycvL4/7778ff3//Mf1EJCQmJC4S6ujp27NhBbm4uZrOZIUOGkJiYyJ49ezAajZSXlwOd87NnTtbr9YSEhLBt2zYUCgVtbW1ER0cDnQ0WPYvJ8PBwGhoakMvlNDU1YbFYiIiIwGKxoFAosNls2Gw2HA4HarWaFStWMGnSJBISEs7Xz3HWkIyTixCn08n27dt57bXX+PHHHzGbzajVapRKJYGBgTz66KNcf/31fP755zz88MMcO3asi2Gi1+uRyWT861//wm6393iu0tJSWltbu91ms9nQaDT84Q9/4IUXXiAnJ4dnn32WjIwMqcJHQkLiV0VLSwubNm0iLy8Ph8OB2+0WwzAFBQWEhYVxySWXcPjwYerr64HOhos6nY7AwEB27tyJWq3G399f9GDLZDJUKhUKhQKXy0VNTQ0ulwsfHx/8/PxobW2lurqaoKAgGhoaEAQBmUyGWq2mtLSUpKQk1q9fT3x8POPGjUOr1Z7Pn+iMIhknFxnNzc28+OKL/POf/xSt6WnTprFo0SI0Gg2RkZGEhoZyzz33sHTpUnrqTmCxWOjo6Ohxu4f6+voeE18FQWDJkiXMnTuXv/71r7z66qtMnDiRhx56iHvuuQc/P79f/H0lJCQkzjfFxcWsWrWKjo4OwsPDaWpqwul0iguxsLAwoqOj+fbbb2lpaUGhUOB2u8VQTUFBAS6XC7fbjVwuF/9FRUWhVqtpaWnB4XDQ1tYGdHq/9Xo9ra2tuN1umpqagE5vtWeh2dzczO7duxk/fjwmk4mPP/6YyZMn/2pyUSTj5CKivr6em266iTVr1iAIAqGhoTzyyCP88Y9/FJNSbTYbf/zjH09qmABdPCknw+12n3Tb//73P8rLy3nqqaf48MMP+cc//sG3337Lfffdx+zZs6WEWQkJiYuWwsJCvv76a4KCgsjIyGD//v0EBAQwffp04uPjgc5QzyeffILdbkelUiGXy9FoNCgUCpTKzsesp4zY6XQiCAIKhQKZTEZxcTHQ6c1OT0+nuLgYm81GVVWVeA0ej0lKSgrNzc3U19ej0WgA2LNnDyEhIURGRrJq1Sr8/f255JJLiI2NPce/1Jnl4jevfiPU19ezcOFCVq9ejUKhIC0tjU8++YQHH3zQ6+G/du1aPvvsMwRBwMfHRxzAP0ej0TBv3rxelQP3lI9yIrt37+bNN99k5syZPPbYY9TW1nLjjTdyzTXXcPDgwVN6aCQkJCQuNAoLC/nmm28IDAyksbGRzZs3k5qaysKFC0XDRBAENm3ahMvlYujQoTidTux2OyaTiba2NpqammhqaqKlpUV8z1NOXFFRgSAICIKAyWSiqKiIgIAAxowZg4+PD4GBgaIRIwgCRUVFBAcHExwcjFKpFMPnx44dY/PmzURGRpKQkMCyZcvYsGEDJpPpfP58vwjJOLkI8Bgma9asISUlheXLl7N9+3amTp3qtZ/b7ebTTz/FbreTkJDAnXfe2cX48LgS//Of//Dhhx8SFxcnbuvXrx9Tpkw57essKCjg+eefZ8mSJTzyyCMMHz6cVatWMX36dB555BFKSkokI0VCQuKi4ETDpLm5mbi4OObMmcPMmTO9cjtaWlqoqqoiJiaGPXv2UFtbi9vtFj0ogBjeSUlJYdq0aQiCgEajQa1W43a7sdvtCIKA2+2mtraWgwcPYrPZaG1t9fJyu1wu8vLy8PPz45JLLkEul6NWqxk2bBhZWVkcOnSI7du3k5iYiMlk4qOPPmLr1q1dpCQuBiTj5AKnqampi2Fy1VVXERQU1GXf0tJS1q1bx5w5c1i9ejXFxcU0NzcDnTfH1Vdfzbp169iwYQPXX389Go1GLENTq9W8+OKL/PGPfxT395S3RUREkJaWJt5onsTb7rj88svRarU8++yzzJs3j9tvvx2Hw8FLL71EdnY2jzzyCMePHz8bP5WEhITEGaGoqIivv/6awMBAjEYjY8aM4brrrmPQoEEoFAqvfXNzc/H19cVsNjN48GAiIyNRqVTExcWJya1paWlMmDCB6667jpCQENFQcTqdDBgwgMTERHQ6HQEBAchkMjo6OnA6nbhcLtFrIpPJxH8eUU2ZTIbL5SI3NxdBEBgwYABms1l8PX78ePLy8njvvffYvXv3RbU4lIyTCxi3280rr7zCjz/+KBomQ4YM6XH/oKAg/vOf/7BkyRIANm7cCHR6S66++mo++OADJk2aRFpaGtAZ2pk5cyYAqampTJ48mfz8fAAmTpzIunXrePvtt1m+fDm7du3ikUceQS6XM3DgQL7++muSkpK6VOXs3LlTzIF58skncTqdvPTSS/z+97+nra2Nl156iRkzZnDgwIEz/GtJSEhI/HLa29tZs2YNdrudhoYGxowZw7hx43qsQIyIiCAsLIxp06aJVZAxMTEUFxfj6+vLNddcw7XXXsvEiRNRKBRERUUREBBAcXExbrebgQMHYrVaGTt2LCaTiYEDBxIVFcWQIUOYN28ewcHBREVFodPpSE5ORi6XI5PJaG1tpX///gwaNAg/Pz+OHz+O3W4nIyMDt9vNxo0bxQRdpVJJTk4O27Ztu2gMFMk4uYDZvHkzb7/9NsnJyac0TKAz4cqTgPrZZ5+Jg/df//oXH374IQEBAV3KhrOyslAoFNxxxx0olUq+++47AK655hrGjRvHHXfcwbhx4/D39+f+++8nKiqKtLQ0xo0bx9q1a1m8eDGRkZHijVtfX8/TTz/N+PHjcbvdLF68mHvuuYfY2FiuvPJKoLNS6MYbb5Q8KBISEhccGzduJCUlhcjISMaNG3dSwwQ6F3Zz5swhPDyc3NxcAgICqKysJDIykmuuuYaYmBiqq6tFo0CtVhMUFERUVBSRkZFotVqcTicqlQqDwcDVV1/NbbfdxpVXXklGRgbZ2dnY7XaUSiUzZszg0ksvJTY2FoVCQV5eHrW1tQwdOhSNRsPhw4cpKysjIiJC9OC0tLQA0NbWxu7du9m/f/+5+Bl/MZJxcoHS3NzMokWLMBgMfPHFFz0aJm1tbd1W0yiVSm6++WZ+/PFH/vSnP2EwGGhsbOSZZ57x2i8jI4OwsDCmTJkiCgvJZDJ8fX27HFOn0+Hj48OVV16JXC4nKSmJm266iZ07d/Lcc88RGRkp1uOvXLmSAQMGAGA2m3nllVcYOnQoer2e/v37c9ttt4ky+RISEhIXAgUFBdTX19PW1kZqamq3hoknMfXnyaaCIBAeHo4gCFx22WXceuutxMXFkZ+fz44dO7z0ohITE0lOTiYlJYXDhw9jsVj46aefUKlUyGQyMQkWOufd0NBQwsLCCA4OZsyYMSxYsICpU6cSGhpKfX09hw4dIjw8HIPBQGtrK1arFYPBgN1ux2g0EhISgsvlQi6XU1tbe1F4TyTj5ALE7Xbz0ksvcfToUZ555hmGDh3a7X55eXncd9993VbTLFq0iP/85z9e5WTV1dWsW7cOi8UivqfVasUbYd++fVitVkJCQhg7dmyXYzY1NdHa2kp4eLjX+3FxcTz88MPs3r2bd955B4VCQU1NjVfopq2tjU8++YSMjAw2bNjAnj17GDlyZF9/GgkJCYmzgslkYuPGjQQHByMIAqNHj+5imDgcDvbu3Ut5eTl1dXVe2zQaDWPHjuXmm29m2LBhYglxQ0MD/fr1w2g0ivuq1Wri4uKQyWRUVVWhVCqx2Wz079+/y3V5lGL9/PzE61EqlYwZM4ZbbrmFmTNnEhAQQHNzM+np6SiVStrb20WhuMDAQMrLy0U9lMTExItCJFMyTi4w3G43//73v/nnP//JrFmzuO6667rd74cffuCyyy5j4sSJ3ZYDe5JZT0QQBKqqqrwyt+12exfPyzXXXENSUlKXYzY0NGA0Glm+fDkOh8NrmyfOmp2dTUBAQLfXfPjwYWpra3G5XCxbtozbbrvN64aVkJCQOB9YLBZWrlxJeno65eXlTJkyBbVa7bWP2+3m+++/Z+PGjXR0dHQrGR8TE9NtCw9/f38xvAKd825VVZXowRg9ejQajYbhw4d3+WxTUxMajYaKigqxwMHzOa1Wy6hRoxg2bBgdHR1UVlaKxRLNzc0oFAp0Oh1qtZqKigr8/Pz44YcfKCkpOa3f6VwiGScXGHv27OHxxx9Hp9Px0EMPidb3iRQUFLBo0SIGDhzYo/HSHfv37yckJASdTie+d+jQIVGG3nPzDBo06KSW9ZIlS9i5c2e3hkpERASpqalA5+rglltuEbPbZTIZlZWVpKenExERwTfffMOtt94qGSgSEhLnDUEQ+Omnn/D396ejo4P+/fsTEhLSZZ+9e/eKyaUjRozo9fEbGhpYt26dl9FSWVnJTz/9hNvtxu1243A4CAgI6FGXymQyYTab+emnnzh+/DiffPKJl7Hj8ZB7jCDPAtRgMCCXyxkyZAiVlZW0t7cTEBDAV199dcEbKJJxcgFRV1fHQw89RHNzM7///e/Jysrqso/JZOKuu+6irq6Op59+GqvVyoYNGwC84ohut1tsOgWd9fHr168nKirKy9PicrmIjY0lMjJSLE1LSUnp9vqsViuCIIhdNpcsWcLChQvZtm2buI9MJhMNKo1Gw7333itWB4WEhDB+/Hhqa2uZO3cuMpmMb7/9lqeffrpLt2QJCQmJc0FhYaEY0i4uLmb8+PFd9vF0IPb19WXw4MG4XC5++OEHHA4Hra2t4txrMpnEvjoARqORqqoqWlpavDzK/v7+TJ06FR8fH5xOJ0qlEoPB0KVMGTp7qUVGRhIYGIivry/ff/+9OJ/D/1ePhc5wj9vtJjExUTyPw+Fg8+bNBAUFUVFRgd1uJywsjG+++UaUy78QkYyTCwSz2cwtt9zC1q1bSU5O5o477ujWe/Hxxx+Tk5PD66+/TmZmJs888wybNm2ioaGBxx9/XKzGKS8v5y9/+Yso4FNeXs7q1au7CLdBZ3KW1WqluroavV7fJafEw7p163A6naSkpJCUlITZbMZqtfLNN98gCAIrV65kw4YNXoaGXq8XlRRbW1sJCAjAaDQSERGBr68vgiDw/vvvs3PnToCTNiGUkJCQOJOUl5fz9ddfYzAYyM3NZeTIkV3C5E6nk7Vr1+Lj44NcLmfo0KGUlpbi4+NDQUEBW7ZsobKyEoAjR45w4MABsRfOkSNH6OjoQK/Xe3lOZDKZqGPi5+dHS0sLwcHBXa7PoyIrl8sxmUwkJSXhdDpJT0+noqKClpYWvv/+e1avXo3b7RaNm6CgIDGkExgYKBpHHR0dNDQ0MHjwYGw2Gxs3bkQQBCorKy+4BaJknFwAOBwOnn32WX788Uf8/Px49tlnu41nNjc3s2TJEv71r39xww034HQ6+fHHHxk+fDgvvPAC69atw2w2YzQayc/P5+OPPxb7M3z22WfI5XKuuOKKLsdNTk7GYrFQU1ODn5+f2Mr7RARB4NChQwCMHDkSlUolbtu5cycOh4OlS5cyd+5cr1I1hULBgw8+SHBwMDabja+//lrc5jG+zGYzTzzxBCaTiXvuuYctW7ac1u8oISEh0Vuam5v54Ycf8PX1xWKx0K9fP4YNG9Zlv2PHjhEQEIBWq2X27Nm0trYSGhpKU1MTW7Zsobq6mrKyMvLz88X/5ufn43A4OHjwIGq1WkxUPZG2tjZcLhcGg4Hm5uYuoSTo1FxxOp34+Pggk8lEAyYkJARBECgrK+PYsWP4+fmhUChEAyM0NJTMzEwqKysZN24cycnJNDQ0AODr68uuXbvw9/cnPz+fnJwcioqKyMnJOdM/8S9CMk4uAL7++mtefvllZDIZL7/8MvPnz+92P5fLxZNPPskNN9wgPtgDAwPp168fa9asISsriz179jBlyhR27txJW1sbzc3N2O12vv/+e/r160dUVFSXYwqCwPbt22lra2PAgAE9dhNOSUnBz8+PhQsXIpPJMBgM4goAEEWETgwnAUyZMoXf/e53gHfoKTQ0lLi4OPr378/OnTvF8NTixYsvilI3CQmJixOHw8GaNWtoaWmho6OD0NBQrrzyym5z/JxOJ+Hh4Vx55ZVoNBoEQaC1tRW73U5dXR0tLS2UlpayZcsWjh49itVqpa2tjZqaGmw2G3K5vNvFptvtpri4mNjYWJqbm4mIiOiyj1qtRq/XU1tbS3JyMgaDAY1Gg9VqRS6X4+fnh1qtpr6+nvj4eLG8WS6XM2nSJARBYOPGjQQEBNDS0oJer8fHxwebzYbZbGbEiBEcPHiQyspKDh48eEGFeSTj5DxTWlrKfffdh8Ph4Pbbb2fBggU9JqOGhIQwY8YMr+1DhgwRk6GKiopEKeN//OMfOJ1OysvLqaqqorCwkNmzZ3t5PAC2bt2KSqXik08+wel00q9fv25vUJlMxlNPPcUPP/wglgCPHDkSpVJJY2MjbW1tzJgxg1GjRnn1nfDw5JNPimq0AAcPHmTw4MHU1tYyaNAgrFYrP/74I+PHj+enn36SkmQlJCTOGnv27CEvLw+lUolOp2PWrFndzlvQqQU1fvx4MVlVJpNRVlZGfHw8vr6+qNVqGhoaUKlUqNVqlEolRqORsrIy3G43Wq3WK1RutVrRarUYjUYaGhqIiYkB6HZR6Ofnx5w5c9DpdEyZMgW5XE5kZCTV1dX4+/vT1tZGRkYGZrOZsrIyL30qvV7PVVddhdvtpq6ujtDQUHx8fKipqWHgwIFUVlZSXV0tGmitra0XlDCmZJycRwRB4N///jfV1dVkZ2fzzDPPeFXSnAq1Ws29996LVqslPT2dxsZGRo8ezbhx43C73QQEBJCens4333yD0+lk1qxZXp+32Wzs2LGD4cOHc+TIEZRKJZdcckmP5/Px8WHcuHFij53k5GRSU1MJDw8nICCA1NRUNm7cyGOPPYZcLkcQBC834z//+U+xRPmnn37i2muvJSoqiuXLlyMIAjt37iQzM5P29naKior6+GtKSEhInBqLxcK+ffuIjo7GarUyY8YMwsLCTvqZExeEERERxMTEkJycjFqtJiAgQAyhhIaGYjabiYiIEI2ffv36eS0K6+rqCAsLo6amBn9/f4xGI0FBQV7d5U/E47Xx5MKkpKRQUlJCY2MjQUFBjB07lvnz5xMYGEh9fT0+Pj6iPERKSgrjx4/HZrMRHx8vdjlub28nMzOTmpoaTCaTaFSVl5f/0p/3jCEZJ+eR0tJSPvroIwIDA3nrrbd6bKbXEzKZjPj4eJRKJe+88w5vv/02fn5+YtfMN954A4B//vOfXHnllQwcONDr8263W6yBr6ysZMiQIcyePbvX5w8ODubaa68V+0lAp5rhfffdR3Z2Nh0dHV5CbKmpqbzyyiuoVCoaGhr45ptvePHFF0WD7PDhwxQUFBAdHU1FRUWffgsJCQmJ3rB//37CwsKIjIxk1KhR9OvXr0+f9/HxYcyYMURFRTF16lSxD47H0EhNTUWtViMIAoIgdBGbtNlsmEwm2tvbiYqKYufOnd1WCPVESkqKGC5SKpUoFAqSkpLEeX/MmDGUlpaK+48ePZqEhAQOHjxIv3790Gg0lJWVUVZWJoaHamtrSU9Pv6DE2STj5Dzy/vvvU1NTw/z588nIyPhFxwoKCiI7OxuFQkFWVhYPP/ww8+bNIz8/n5qaGq699lrR4+FBo9Hw4YcfIpPJCA0N5d577+1W0O1kLFq0iA8//NCrBM5gMPDWW2+h1+u7qNfOmDGDq666CoAVK1ZgMpmYO3cu0JmZvmjRIoxGo9gtWUJCQuJM4fGapKWlUVpa+otVqtPT05k2bRpBQUGkpaURGBjI9OnTKS8vx+l0Eh0d3UWUMioqSmz419TURFRUFHFxcb0+p1arZf78+UycONErTyUlJYW0tDR27twpVmlCZ/7J5MmTRfl6X19fXC4XCoUCtVqNRqPBbDZTXFz8i36LM03X5AKJc0JZWZnoNbn77ru7GA6/hMmTJzNp0iRkMhk1NTVMnjy523CNXC4nMTGR4OBgdu/efUrXZnfodDqxVPhE+vfvz4ABA6iurvZ6X6vV8ve//52NGzfS2NjI888/z5tvvsm3335Le3s7paWljB49mkmTJvX5WiQkJCROxoEDBwgNDaWsrIz09PRu1VxPl8mTJ2Oz2dDpdISEhBAQENBtTzS9Xs/o0aMpLi6mtbW120qeUxEeHt6t5EO/fv3Izc3F5XLhcDjEcFJERARjxoxh8+bNDB48mH379uHj44PFYiE4OFjswNwXz/nZRvKcnCdWr15NbW0tV111VZdwy5ngxDDLa6+9dtJcFoPBcFqGyclQKpWEhYV5hXU8JCQkMGbMGACKi4uprq72KnFWKpVn1FiTkJCQ8BQLREdHU1JS0ieV196gUqnQ6/W4XC46OjoICwsTk127Izk5mWHDhvWYiHs6eAyO9vZ2rx5q0OlZUSgUCIJAYGAgSqUSlUqFy+UShTIvpHn3wrmS3xAtLS28/fbbGAwGbrnlli4Dwm63U1pa2qXr5elw4403igPvXCKXyxk/fjxFRUVdbhKNRsOf//xnfHx8cLlc/Otf/+KWW24RXZT5+fmiqJGEhITEmaCwsBCZTIbJZCIlJaWL16S5udlLUO10UavVzJo164wbP70hKCiIkJAQzGYzjY2NXtuioqJITU2loKCA9PR0zGYzOp2O9vZ2wsPDkclkXrkq5xvJODkPrF69mtzcXGbNmtVt99+PPvqIzMxMnn322fNwdWeOq6++GqvVKqq/nsgll1zCuHHjAMjJyeGhhx4SG1Y1NTWxb9++s3593XVzlpCQ+HWya9cu+vXrR15eXpd51+l0smrVKsrKyli9enUXraa+otPpzktyqUKhYPDgwRgMhm7LgseOHYvNZhP76jQ1NaFQKCgpKcHf359jx46d9Wv0NC88FZJxch5YtWoVAHPmzOniNTEajbzxxhu0tbWdUXff+SA5OZkXXniBL774oouomlKp5JprrhFjrQcOHKCwsJAJEyYgk8nOeklbRUWFKAwnISHx66a9vZ3GxkbcbjfBwcFdklSPHj1KUVERdXV1mM3mC6pqpa94uhQrFAqv5oDQmasSHR1NTU0N/fr1IywsDB8fH4qLi0lOTsZsNp81GXuXy8X+/fvZtGlTr/aXEmLPMVarlcLCQiIiIrotH/vyyy/Jz89HqVR261W52JgxYwYjR47s9ma/+uqr+cc//iG6Ep1OJzabjeDgYFEq/0xiMpkoLi7m6NGjPPXUUxw9evSMn0NCQuLCw2g0otFoOH78OBkZGV7zkdvtZufOnajVajo6OkhLS0OtVp/Hq/1lyGQyRo4cSWlpaZeqR7lczogRIygrK6OiooLk5GQOHDiATCajurqa9vZ2rFZrj5orvcHpdOJ2u7FYLAiCQG1tLcePH6ehoYHS0lKvLvYnQzJOzjE1NTUUFhYycuTILkmoLpeLL7/8ErfbzZgxY8Sk0YsZuVzebc8I6OyYecUVV4h6LAC7d+/utXS9Z+CfmNPS3NxMcXExFouFxsZGOjo6yMvLw+12c/jwYcrLy7FaraJIkYSExK+f8vJyUaTs59WFDQ0N1NfX43a7kclkjBo16jxd5ZlDq9XSv3//brclJiZiMBgwmUwUFhYSEhIiGm+nwuVyYbFYyM/P98qJ9DQUtNvtOBwOXC4X7e3tKBQKAgMD8ff3p7y8nPj4eAoLC3v1HSTj5BxTUlJCS0sLY8eO7RLSKSkpET0GV111VZ81Ry42ZDIZt956K++//75oYPTGMGlpaeHjjz8mPz+/S9tvj/elu+OEhYUxaNAgDh48SHBwMH5+fpL3RELiN0BVVRUqlQq5XN6l+69HjdrPzw+5XN4nzZGLEb1ez8CBA9m1axdms5nJkyezY8cOjEbjSRdtx44dY926dbS1tXUpcoCuc7evr6+ofHv8+HFcLhfHjx//bXhOLrQWz71h+/btXq2tPbjdbl588UWqqqoIDAzsIjV/KlwuFzKZ7IIoBXO5XOTl5XHgwAHGjx9PbGxsl+/rITExkfj4+C5GQn5+PjabrVtrPicnh/vuu69P3g9fX18WL17Mpk2bWLRoEePHj8fhcJy01E9CQqIrVVVVaLXasxr6cLvdZ2wuczqd1NbWkpWVRU1NjVdIp7W1le3bt+Pr60tbWxvZ2dm9Pq/b7aa2thatVktgYGC38/q5QhAE6urqyMvLo7i4mIEDB5KUlERYWFi33yc+Pp6dO3cik8lobW0lPj6e/fv3I5PJaGpq6jass2/fPgRBwG63i+q3Hn7+7NFoNOh0OhoaGlAoFGRkZGC1WkWvzRdffHHK73RRGyfXXnstWVlZjB8/Hr1ej1KpJC0tTUyy9Eizny4ul4tVq1Yxa9asM3KjuN1ucnNzkclkXaTqc3Nz+d///gd0yrwnJyf36pjV1dWsW7eOjz/+mBtvvJHk5GS2b9/OnXfeKbrXQkJCzlqClyAItLe3iwN1165dvPLKK+Tl5VFbW0t4eDhDhw7l5ptv5vLLL+/y9/D19SUhIaGLcdLY2IjD4ejWONm1a1efwzLz588nJCSEtWvX8vjjjxMQEHBBdeCUkLhYKC8vp7q6WlQh9YRuPXNMcHAwISEhp53Q39LSwsaNG5k0aVKXxNXToa2tDUEQ0Gg0aLVar7lw9+7d2O12kpKSaGlp6dW8a7PZOHz4MIcOHUKtVuPn54dCoaCuro4JEyZgs9lwOBykpqb+otyNnnA4HJhMJiorK8V5sKSkhOPHj9PR0UFGRoYo4bB582amTp3a5XkTFBSEQqHA7XbT0NAgphi4XC7MZnOXc3oMPKPRiE6nQ6VSYbVavQwUQRBQq9VERERQU1NDY2MjOp0OPz8/goOD8ff3Jz09vdflyhe1cfLKK69QWFhIR0cH+fn5FBYW4uPjw/HjxzGbzQQHB5OYmMh1112HTqfDx8eH0NBQ5HJ5ryzcXbt28eWXX/bZi9ETLpeLkpISIiMjvUTHoFPK3ZNZfdNNN50y/tfS0sKbb77Jhx9+SFNTEw8//DDDhg1j3rx5FBcXs379esrLy+no6ODqq69m8ODBQKdQ0GWXXdbnPj4eHA4HFRUVWK1WfvjhB/bu3esll9zc3ExHRwcPPvggRqORgoICbr75Zu688062bNnCX/7yF0JDQ8XjKRQKrrjiCtasWeM10BsaGqisrOw2btra2tqna/b39+fWW2/lk08+weFwXPRVUBIS55vKykoUCgUBAQFotVrKysrw9/cXu5QXFRUhl8uRy+X4+PgQEhKCTqcT5QI8236OIAjk5eUREBBwxjzj7e3tyGQyjh49yrBhw0TjxOFwkJubS1hYmLiQ8nR474mysjK+++47HA4HgiBgMBgwGAwcOHCAyMhIvvzyS/Gh7efnJ3YJViqVTJgwgaSkpD6rwULnnLd//37sdjuFhYVi4qoHmUzGgAEDqKyspKKiAovFgkKhoLGxkWXLljFlyhSvHkKhoaFi88Hy8nLRKJPL5VRXVzNgwACv85/oMfGoyvr6+tLc3IwgCPj4+KDVajEYDFRXV+NwOJDJZCQkJFBWVkZhYSHz58/v03e+qI2To0eP8tFHHxEcHExsbCzjxo0jJSVFTHhyu9388MMPrF+/nv3792M2m+no6CAoKIikpCScTieJiYmMGDGChIQEfHx8xJV9e3s7//jHP1i0aNEZcy/W1tZSWlrKJZdc4tUTwel0smXLFqDT23Oqfg8tLS388Y9/ZPfu3cyePZubbrqJgIAAbrnlFgoLCwkMDGTdunWiVf3666+Ln5XJZAwfPpwFCxZw66239tmyf/PNN3nqqadwOp3dWtgevvjiC3x8fGhsbCQ5OZl///vf3HHHHRiNRj744AMvD0pmZiYKhcJrMmpra2PdunXdGieeCa63zJ49m4yMDG6//Xaam5vZvn07EyZM6NMxJCQkOqmurqalpUX0TJeXl+Pr64vT6cTlcuHv709bW5uYDOnJUbDb7djtdpRKpbjKDggIIDg4WJyHKioqUKvVOByOHhPp+0pVVRV+fn7U1dV5ybM3NDTQ3t5OdHQ0x44dY/DgwT0uWgVB4Pjx46xbtw6r1YpCoSA5ORk/Pz927dqFy+WiurqahIQEcZHZ3NxMe3u7eIzPP/+csLAwpkyZQlJSUq+fKyaTiS+++IK6ujpcLheCICCTyUQjy9Mrp6ioCLfbjVKpxNfXV/RouVwu1qxZIwq0QacREhoaSl1dHRaLBbVaTVBQEI2NjRQWFjJ58mQvD5NMJkOj0YhJsM3Nzfj7+xMaGoparRYbHVZUVGC325HJZGi1WqKioigtLUWpVJKTk4O/vz8dHR29+t4XtXGyYMEClEoll19+OSUlJSxfvpz6+nqCgoKQy+UMGDAAvV7P2LFjue6664iPj6ehoQGtVktLSwutra3s3buXFStWUFdXR2BgoJgsVVlZSXR0tCgUdiaw2+1YLBYx8cqD2+0WQwwDBgw4qZz92rVreeaZZ6isrOS7774jNjaWb7/9lieeeEJ0l51M5EYQBPbu3cv+/fv55JNPePPNN3ulZGiz2XjjjTd4/vnnexUOObGr8NVXX83DDz/MSy+9xL333suiRYt4/PHHCQsLQyaTYTAY0Gq1XtnfgiD0mKyalZWFTCbrVfKsXq/noYceoqSkhNLSUmw2G7Nnz+byyy/vdWKWhITE/6ehoYH09HTa2tpwu93YbDY6OjoYPnw4jY2NGI1GIiMjMZlM1NfX43K5qKysRKfT0a9fP5xOJxqNBh8fH3F1X1lZKd7zycnJPVaanA42m00MYXh6zUDnfOx2u6mqqsLhcHTxFpy43/r16ykuLhZTByIiIjhy5AiHDx9Gr9cTExOD0WiktbWVAQMG4Ha7KS4upqGhAY1GQ0REBA6HA4fDwYoVK8jOzu5VJ+La2lq+/fZbampqxDwPnU4nKrsKgkBaWhr19fXY7XZ0Oh0KhQKXy0VVVZVocISFhfHll18ycuRIUlJS0Gq1+Pr6isdbt24dCoUClUpFe3s7HR0dXgtXpVJJaGio6ClxuVxiB+aamhrMZjMGg0E07mQyGZmZmbS0tGCz2QgPD+fo0aMcP3681zIRF7VxIggCN9xwA++8844oS1xcXCyWMe3Zswe73c6aNWv47rvvkMvlHDhwAJ1Ox6effsq0adOYP38+drud2tpaDh06RG5uLgCTJk1i7ty5XoP5TCCTybqs/D0eFegMQXQXdrBYLOzatYtbb72VyspKJk2axKuvvsqOHTsoKiry6kLZG1wuF7t37+a+++7jhRdeYMiQIT16URwOB4sWLeLdd989rQd6SUkJd999N5GRkXR0dPDGG2/www8/MHXqVCIiIhg0aBA6nQ6TyYRcLkelUmGz2cjNze02Ma4v+TPz5s1j0KBBvPHGG6Lqo8Vi6VVCloSERFc8oZdbb70VpVJJZWUl5eXlYlm/QqHAYrEQGhpKcHCw2A3Xbrcjl8u59NJLsdlsNDQ0UFtbS2trq+jl9fX1JSQkpNumdr8Eq9WKSqXyCqlUVlaKi6P29nYxBOPBZDLR0NBATk4OFRUVBAUFUV1djclkoqamhrS0NFpbW5HL5Rw/fpy4uDiMRiPbtm3rcu7jx48TExPDoEGDKCgoIC8vj8bGRtLT0wkPD0epVHaZf+vq6vj8889FL1VKSgp5eXk4nU7a29txu90IgiCqaXu8G57+OWlpaeJ+Op2OyMhINmzYwIYNG0hNTcVoNCKXy4mJiREVY+Pi4igtLaW9vf2UXnVPHx+PB72jo0N8dmk0GrKysli+fDkulwutVit6x3v7rLqojZP58+cza9Ys0VozGAwMHTpU3O7pbOuxON1uNxs3buR3v/sdX331lagjolariYuLIy4u7px0Zfx52MZms3UrpW61WiktLeW9995j69atHDlyRLz58/LyRO9ERkYGw4YN6/IQz8rK8srvaGtrY8eOHTidTvbt20dHRwf79+8X45FpaWmEhYUxYsQIhg8fjk6nw2az8dJLL/HZZ591a5hERESg1+uRy+UMHjyYgIAARo0addKcFqfTydq1a/nss8+YNm0aW7duxW63A505MYGBgdTW1vb4eY9L81Sek5CQEB544AFxklEqlRdlhZeExIWEJzzgCREEBASQkZEhbvc8NI1Go/gwqqmpwel0cvDgQSZOnIhWqyU2NpbY2FgcDodonHjKfc80dXV1xMbGei38bDab+ED3IAgClZWVFBQUcOTIEVpbW1Gr1YSFhWE0GkXvgdPp5PDhw2J+iVqtFues8PBwr2M6HA6MRqOYR6dUKhkyZAg2m421a9diMplQKpX4+fkRFxeHWq3G6XRy6NAhzGYzAQEBpKWlcfDgQVwul6jH4pn/NBoNcrmcwMBAr3C50WgEICkpCavVSlVVFYIgEBAQgMlkIj4+Xgw7xcfHU1ZWhtls7vH3//mi0N/fXwxhCYKA0+kUPeAjR47EZDJRXV2NUqmkvLyciIgIMXH6V1+tU19f3yuZc8/DTC6XM2XKFCZPnsyePXtwOp2iJV1YWMiyZcswm81otVouvfRSEhISCA0N7ZU4TV/4+R/5+PHj4sPZ7XbT0dHB5s2befXVV9m5cydms1kciOPGjeMvf/kLAwcOFA0AhUJx0q7DJ3LLLbcAnd4Dj/dk+fLlFBQUsGXLFurr63nnnXfQ6XTI5XJR6e9EfHx8GDVqlOh5Cg8PRy6Xo9Vqez2xzJ07l6KiIvz8/PjTn/6ETqfjpZdeYseOHVRVVZ30s6mpqQQHB9PQ0NBlW0pKCqmpqaxatYr58+czcOBAMRzVW3E3CQmJnvE0lHO5XN3maHjmgBMXRj4+Prz33ns4nU7q6+uJiYnB7XZz8OBBmpqaxIet2+0mMDCQlJSU007a78v30Gq14txZVVXFTz/9RFFRET4+PgwaNAi32y0mziYkJBAfHy9el0ql6uLhkcvlovfCg9vtFoUfa2trsdvtHD16lPLycux2O+Hh4ZhMJoKCgujo6MDhcOB0OlGpVAQEBBAQEMChQ4fEhWlAQADDhw8XjcOwsDC0Wm2v5t+SkhJ+/PFHKisraW5uRq/XU1paSlxcHDqdDq1W26NnIy4ujry8PGQyGSqVCn9/f+rq6sS/m81mIyAgAJvNRmZmJitWrCA8PJyIiAicTicFBQVUVVVx5MiRXv19LmrjpKKigvXr13Pdddf1ulzWarVy9OjRLh0pV6xYQUREBCqVik2bNnHddddxySWXEB8fz9NPP31GrPkTk5hOZPv27aJXYs+ePUyZMoUDBw6IbtCxY8eyY8cOhg8fzhdffEFkZOQvvhbPDTl58mQmT56M2+1my5YtzJ07l5iYGJKSktiyZQs+Pj5iyKW6upqQkBD+85//MGPGjF8U8lKpVIwePZoXX3yRpUuXMnnyZBYuXMiDDz7InDlzTuo58fX17fHcDoeDffv2oVAouOSSSzAajbzzzjtehklsbCyTJ08mODiYV1999bS/g4TEb5GOjg4UCgUHDx4kNTVVTL48GU1NTWLSrOde9FT+OZ1O0ftaX19PY2MjTqeTjIwM/Pz8fvH1dudltdvtojfHZDIhCALff/89Pj4+DBkyhOrqanbt2kVAQADt7e1cfvnlXt6hvuCpWILOxRNAeno6JpMJk8nE3r17qampEY2F+Ph4cZvD4RC9E4MGDWLMmDGEh4eftp5KTEwMZrOZoKAgampqMBqNBAUFodVqsVgsJ/Usn+h1ksvlXkaMTCbDz8+PxsZGQkJCqKiooKamhvj4eOrr62lqagI6vTy9lffo0xP3ueeeY8SIEfj5+REWFsZVV11FQUGB1z5Wq5W77rpLzMC++uqrqaur89qnvLycyy67DB8fH8LCwli0aNFpuds9SaGjR4/m9ttv57///S/l5eU9Wn5ut5tPP/2U4uLiLsbMtGnTuOWWW7jllltYvHgxr776Kvfffz/Lly/n22+/7fO1dUdERATx8fFiXgt0Zr5/9dVX4muz2SzW3kOny/Spp55Co9Fw3XXXnRHDpDvkcjnjx49n+vTpXH/99fzvf/9j9+7d5OTksH//flavXo2fnx/jx4/n8ssvPyO5OPHx8chkMmw2G6tWreIPf/iDODlA5w3cV32WsrIy6urqiI+PZ9q0aXz//fdiPFWhUKBWqzGbzbS0tHD99dd7ffZcjl0Jid7S3bzrUTX1cC7HrifB87vvvuPtt99m8eLFbN++ncbGxm69k558Oc/97nnIqdVqWlpaGDNmDJdeeimZmZn4+fnhdrs5cuQI69at63MuXXdER0djs9kwmUxi+LygoIC2tjYcDofoCerfvz9DhgyhtLRUTPp0Op0EBASc0QRdD3q9XuyxFhcXR3p6Otdffz2hoaEMGjSIefPmMXToUMLDw9FqtUyePJmoqKhfJPSmVCrx8fGhublZrDayWq1YrVaUSiUOhwNfX99TGoUeJW5P92WlUkm/fv1oaWkRVbhlMhnt7e00NDTg7+/PjBkzuPvuu3stzdEnz8lPP/3EXXfdxYgRI3A6nTz++ONMmzaNvLw8MZno/vvv5/vvv2f58uX4+/tz9913M3fuXDFJyOVycdlllxEREcH27dupqalh4cKFqFQqnn322b5cjtioqaSkhJKSEv7zn/8QEhJCdnY2M2bMIDg4mJaWFoKDgzEYDGzdupWXX34Zi8VC//79vf7Iw4cPF/9fLpczf/58BEHg5ZdfPmN9WNRqNT4+PnzxxRcsWrSIlpYWbrzxxi7Zy55y3wceeAA/Pz/RkEpMTDzp8T1xP+hcqbS1tdHe3i5aqxEREV0aQZ2IXC7HYDCQlJSEQqEgKSlJ3BYZGUlgYGCPN4bVavUSBfJ8j+jo6B7bh6elpXmtatrb2zl69ChDhw6lsLCQRx99tNvP6fV6+vXrR3V1dY/fZcKECQiCwPPPP49KpSI+Pp4//OEPzJw5E7fbzeOPP85VV13l9ZlzOXYlJHpLd/PunDlzvPY5l2NXo9EgCAJRUVE0NzfT2NjI2rVr2bBhA8HBweh0OiwWC76+vmi1WmpqasTqErfbLQqryWQyrr32WjFsHhoayhVXXIHVamXlypWoVKozEorVaDSivlVpaSlut5uVK1ciCAJ6vZ60tDT2799PcXExhYWFREZG4u/vL2pmJSQk9KhN4nK5sNls1NfXA52VTK2trWICrkKhIDo6msTExB71lVQqFTExMWKI50SZCYvFIs5zP58LHQ4HlZWVHD9+3GveVSgUxMTEEBkZ2SWp1ZObUl9fj6+vL4MGDWLr1q34+vri4+OD3W7nkksu6TYZNjQ0FKVSidvtxu12U11d7ZVEfOzYMXx8fDCbzdhsNtLS0qisrGTy5MkMHToUvV6PIAgnlaA4kT4ZJ6tXr/Z6/dFHHxEWFkZOTg4TJkygtbWVDz/8kE8//ZTJkycDsHjxYgYMGMDOnTsZPXo0P/74I3l5eaxbt47w8HCGDBnC3//+dx555BGeeuqpPim6rl27lqVLl7J161aKi4tpa2ujoaGBb775hm+++UbMmVAoFMjlcjF0EhYWxsyZM095fJlMxuWXX96HX6h3lJWVsXr1apYuXcpPP/3U5Zy/+93vePPNN8Wqnvb29h7FgcxmM3l5eezcuZO8vDz27NmD2+2mrq6O5uZmMXlKJpMRFxfHDTfcwJ///OdujRS3201zc7OYSHUiVqu12xLiyspKPvroI7777juxwZ4HjUZDbGwsY8eO5e6772bAgAFeN5hGo+lywwmCQHNzs7ia6A5P0mxPyGQy5s6dy4EDBygpKeHKK6/kk08+8fL2PPjgg0yZMkV8fa7HroREb+lp3vVwrsfuwIEDOXr0qBgO0ev1Ygnq+PHjEQQBf39/bDYbTqdTDGvk5+eTkpLi9aD/eT6fTCZDp9Nx9dVXIwjCGZODl8lkhIaGsnHjRpqamsQKovj4eA4fPozL5cLHx4fLLrtMXDQdPXqUwMBA0UDySB148jQOHz5MVVUVbW1t4gP3xGR9z9wmCAKhoaFMnz69WwVam81GRUUFOp2uiwS+xWIRk0w9ybCCIHDw4EG2bt1KR0eHKMlgNBoJDg6msbGRvXv3olQqGT58OGPGjPGa+zzH97Q8EQRB9IKo1eoevUS+vr5e38lgMJCdnc3333+PIAg4HA5iY2OpqakhMjKS0tJSL2+/IAhs27atyzOvJ35RzolHqdPzEM3JycHhcHDppZeK+/Tv35+4uDh27NjB6NGj2bFjB4MGDfJKJJo+fTp33nknR44c8aq28WCz2cQyUEB8SKampvLaa69htVppbGxk48aNbN26laKiImpqasjKyqKpqYnm5mZR+0Ov1/PII48wceLEX/LVfxEWi4UFCxZ0qdDxGCbvvvuul/Gg0WjQ6/VinsgLL7zAsWPHGD58OMuWLWPXrl3dVvv8nKNHj/Lkk0+ybt06XnjhBXHweqiurmbfvn08+OCDXT6rUqnQarXU1tZSWFhIZWUlx44d49VXX6WyspLAwEDsdjuRkZGUlZURGhrK22+/zV133cU777zDV199xRNPPMGtt9560gRjT87IggULTvpdTlbmFhERQVZWFm+88QZRUVG8+uqrXjdnY2Njl+94rseuhMTp8nOF5HM9dsePH8/EiRNpaGigpqaGgoICcSXuqWBRKpUoFAoUCgVWqxWTycTgwYOZOnVqr0K1Z7pip729HZvNRlNTk6hJYrVaKSwsxOFw4OfnxzXXXOO1CNRoNCQlJVFVVYXJZGLTpk0EBARgNBopLy8nKSmJUaNGsXPnToYOHYparRZl2g8ePCh6qg8fPixWKBYWFqLVapk4caL4HYuLi4mJicHhcHQxxjzzlkajYd26dQQGBtLY2Ijb7SYqKoqjR4/idDppbm7Gx8eHuro60XMyceJEtm7dyooVK5g+ffpJF3RNTU1kZmbS3t7eY3GF5+/pCbV5FsEymUxclCYmJortRSZNmuRlmBw+fJhNmzb12uA8bePE7XZz3333MXbsWDFRqLa2VlT9O5Hw8HAxwdEjE/zz7Z5t3fHcc8/xt7/9rcdr0Wq1xMTEsGDBAhYsWCBmO3syjz3/ALGq5HzTnTGRmZnJm2++2a1Xw9/fX/ydx4wZg8vl4ttvv0WlUjFu3DgEQaC4uJiKiopu47Se7Oq0tDSCgoIoKiryuhHdbjcvvfQSISEhDBo0iNbWVlEDADrLcvv160d+fr4oZfz2229TUFBAeHg4er1elE6GzrLuSZMmcc0114hiat9++y1ZWVmi6FtkZCQGg6Fb0bhT6RxkZ2fz8ccfd7ttyJAhhISEcOTIEaZMmUJ0dLS4rby8XPSqnMj5GrsSEn3BM++OHj2anTt3Audn7Or1evR6PYmJiYwaNQqbzYbRaMRsNhMREYHFYhFzGaAzpB0bG3ta0u1nAk8Pmbi4OCIiIqisrBS3yWQyZs2a1a13uq2tDYPBIErvexTGS0pKyMvL48iRI5jNZlER98Tz5efn43K5sFqtaDQaVCqVmEzs8cZ4Gg/a7XauuuoqcnNzkcvlpKWliSGhLVu2iA38PNVDw4YNY/fu3ajValFMLTAwkLKyMoYPH87x48f59NNPEQSBlJQU9u3bJ3qKQ0NDRS+5pzBEEAT69+9PeXl5j8aDwWDA39+f+vp6FAoFHR0d5OTkIAgCcrkcmUxGQEAAZrMZmUxGWlqa+Nn9+/ezZcsW8ffoDac9Uu666y5yc3PZunXr6R6i1zz22GM88MAD4uu2traT9kBQqVSixalUKs/bDfFzrFarKN3rESPKzs7mb3/7GzKZjD//+c8nlWb3WNoTJkwQcypOxGg0kpOT0207a0+tfHBwMEqlssvqJTc3l6VLl/KXv/wFjUbD1q1bKS0t5bbbbhMHnlwup6mpieLiYmbNmsWQIUP45ptv2L9/PwAJCQmUlpYSHx/P73//e4KCgnjjjTe8znPieUNDQ7sYJx6FyRMHdnfExMR0q1sil8v53e9+R0tLC3l5efzhD38Qz+l0OnniiSfIyck56bHPJH0duxISJ8Mz765atYr09PSzeq7ejl2FQoGPj49YkQKcNLftXOOZD2NjYzl27JhomOh0OpxOJ6GhoV59Z07E8+BVKBRkZ2d7KaN6qK2tJS8vTyxiOBFfX19iY2PF3LugoKAujQfdbjd+fn4EBQWxfv16nE4nERER4r6CIIhz69y5czly5Ag7duzAYrHg4+NDVVWV2NOoX79+jB07luHDh3sZASdWpwYFBSEIgpf4XWNjY5e/YXcEBATQ0NCAIAhERkbidDpF9drY2FhsNhuBgYFERESISbWNjY2sX79elMTobQXWaT217777blauXMnmzZu9Ws5HRERgt9tpaWnxsuLr6urEJJ+IiAh2797tdTxPVvmJiUAnotFozrjWyPnCMzCjoqL46KOP2L59O9B5M0+dOrXHz3RXHfNzAyM4OJhp06b1+ZocDgdPPfUUYWFh/P73v0cul3e5FpvNJia0rVy5kpkzZxIREcEf//hHcR+n04nD4eiixNgTer2ecePGUVZWBnR6wMLCwrBYLKfsnzN27FjmzZvHsmXLvN5XKpUMHjyYoqIi7Ha7lzT/d99916O3RRq7Ehc6J867njYbII3dU+GZJ+12Ow6Hg0suuURsZWGz2ejXr1+Pei0/13JRKBRe+i3Q6YHKzMzs83XV1tZSUFCAw+Fg+vTpBAUFMXfuXFHeHzoNK5vNRk1NDdXV1VgsFgYPHszAgQNFY6i1tZXW1lax141CoUCj0XSRy/AQExMjykN4KpM8ia2nqsIcNmwYVVVVmM1mampqiI2NFQ240NBQampqCA0N9epv9+OPP3olwXpKo09FnwJ7giBw991389VXX7Fhw4Yu1SPDhw9HpVKxfv168b2CggLKy8tFNdYxY8Zw+PBhMbsZOhNbDQbDWV8JnG9UKpWYeDZv3jzCwsL4+uuvxXbePQ0MlUrF0KFDvX6zM0ljYyO7du0SO4d2R2lpqVg2vnbt2m4HmFKpRKfT9dpTJZfLvdRyo6KicDgcvXL7BQYG8vvf/76LgeZpQrV9+3ZSUlJEoaLGxkb++Mc/9lh5JY1diQsVad79ZahUKrE81tfXV+wH09raSlNTU48GWHh4uNgb5mxQXl4uJhN7cug8hoWHkpISsZmixWLh+PHj4n6eHjsRERFiv5/e5HMYDAb0er3o8YLOufibb745ZXVU//79iYmJEfNMEhISxDlYoVBgNBqx2+2i06KgoICSkhJRpV0mk53SO+OhT8bJXXfdxdKlS/n000/x8/OjtrZW7KcAna6j2267jQceeICNGzeSk5PDLbfcwpgxYxg9ejTQqSeSnp7OggULOHjwIGvWrOH//u//uOuuu34VVvrJUCgUKJVK5HI548aNY9++faxcuRLoTCQ6mSs0KyuLNWvWnJXrqq2tpampiQULFvRoIEVHRzNs2DCgs9po1apVZ+TcJ670pk+fLibPeXoNnYzo6OguLsLo6Gji4uLYtm2blxT2woULu1WU9SCNXYkLle7m3RM1TKSxe3I8De2ioqIIDQ3l4MGDQOec61nxd4dHgsFms52VJqH19fW0tbXh6+tLVFRUt/skJiaKhkBgYCAHDx78xbpKHr0nT+6lj48PISEhaDSaLto43REWFuZVNeTxMPn7+1NeXk5ZWZloJOXk5OB0Or2qmLpLO+iOPhkn77zzDq2trVxyySVERkaK/z7//HNxn9dee43Zs2dz9dVXM2HCBCIiIvjyyy+9fpiVK1eiUCgYM2YMN954IwsXLuTpp5/uy6Vc1MjlcsLDw3n88cdFD8SIESNOKkE/dOhQNm7ceMarPQRB4LvvviM8PJwZM2b0uJ/BYOCJJ55Aq9XicDh45plnui05Pl1CQkL405/+xLZt27DZbF1EprojLCysS7MuvV7fbS5Kb4wpaexKXIh0N+/+PEdCGrunprW1lfb2dnJychg5ciRWqxWdTudVln0iMpkMf39/9Hp9r9qk9AWz2UxlZSVms5lBgwb16PFITk5mwIABHDp0iEGDBlFSUtJjt/a+otVqKS8vF/NnBg8eLOaTnAxPzopMJqOjo0P8f8/C1vN5u92O0WgUt3uMkxO7z5+MPod1uvt38803e33ht956C6PRSEdHB19++WWXmGZ8fDw//PADZrOZhoYGXn755QsmafVc0d7e7tVj4MSKku6Ii4sjPDyce++9t1dehd5SUFDA+++/z6uvvtqjroiH8ePHixnfR48e5d133/3FCo6RkZFkZmby8ccfk56eLrpQ//vf/57SwpbL5V3GzfDhwzGZTKIKr8Ph4O233+5xAjoRaexKXIh0N+f+vJxYGru9o76+HofDgVKppKWlxcuD0B0pKSmUlJTw7bffsn379l6v+k/F3r17KSsrIyEhgaysrJPum52djdPppKOjg/j4eLZu3drrB3xPBAYG4na7SUtLY9SoUTidTvLy8qivrxcrLnvC4402GAxMmzZN7HrscDjEXnRtbW0sXbqU5uZmscrI02G+t8nSZ779o8Qp8Qj8nCzM8HMUCgUvvfQSTU1NjBs3jmeffbbXiUU9YTKZePTRRxkyZEivEmnVajX3338/Wq0Wt9vN008/zV//+lexb8LpMHXqVLZs2cKMGTOQy+Wi7kpxcfEpb0B/f38GDx7s9Z5cLufw4cNUVFQgCAKffPIJ999//1nL15GQkLg4cLvdlJaWolAo2Lx5c6/mreDgYPHhvXnzZpYsWSJqi5wulZWV7Nixg/DwcEaPHn1KA9GTU5KXl0d8fDxqtZolS5aQm5vb67LcnzN79mymTZvGFVdcQVhYGHq9XhSm88j590R0dLQounfi71BYWEhqaiput5uvvvpK9PL/3HPSW8V1yTg5h8hkMoKDg0lPT6e2trbPAzwkJISPP/6Yu+66i88++4ypU6fy3//+97RuFLfbzV/+8hcOHjzIK6+80uvyrvHjxzNp0iT8/Py4++67+eKLLxg3bhx/+9vfTtsAOPHcHg+HxWIR23n3RE9VTE1NTbjdbvbu3ctTTz3VbYmfhITEbwNPTk1hYaH4EPZ0Re4NycnJzJs3j8DAQCwWC3a7nQ0bNpxWWLu1tZVvv/0WHx8fsrKySEhIOOVnZDIZ2dnZuN1u9uzZQ1VVFR0dHXz11Vd8/fXXHDt2rM/XodPpxColpVKJWq0WvRunWhR6QlAnGhqeXJLS0lKUSiXt7e20trZ2GyKSjJMLEIVCwXXXXcdNN9102rLM/v7+PPbYY2zfvp3XXnuN1NTU01JTbGpq4vPPP+fyyy/vsca/O9RqNTNmzMBqtTJt2jTWrl3L/PnzaWxspKqqqs/X0RMBAQG9CsV4StZOxOOmbW5uPuOxYgkJiYuL0NBQBgwYQFxcnFhF0tck4MTERBYuXMjMmTNpbGxErVafNEewJ4qKirBarTidTrHBaW8IDw/HYDAQHh5OYmIi48ePx2Aw4Ovr+4s81ycyduxYgoKCxArHntBoNGi1WgRB8MqBVCgU1NfXExISctLnW29Dib+tgOMFwA033ADA66+/7vX+0aNHvfoxnAo/Pz/GjRt32texbds2TCbTaR1j1KhRCILA66+/zhdffHFWkupOVPU9Gd2VQf7aqw8kJCR6j1wuZ+zYsdjtdsrLy2lsbESj0aBQKLBYLJhMpl7lQeh0OtLS0k4pENkTTqeTsrIytFotPj4+J23B0d13iIqKQi6XU1NTQ3R0NHfeeSdut/uM9fQymUw4HI5TejY8Ym3t7e2iYSSTyVAoFNjtdlwuFyqVCrvd7tWLx7PfgAEDenU9kufkHCOXy5HL5QwbNszL4+FRjj0XFBYWct9995GWlsb06dP7/PmUlBSGDx/Ojz/+yIcffnhGr83T+dhkMnUray8hISHRV/R6PUFBQaIMfH19vVii+0tLc3vLnj17aGlpQS6XiyXefWHAgAE0NjYyatQo1q5diyAIaLXaM9KHyNfXl927d9Pe3v6Lcxmrqqqw2Wxdck08rwcNGtSr40jGyXkiJibGy1pvbGw8I+3BT4XJZOLuu+8mJCSEjz76qEcVwZMRHBzMww8/jEwm47XXXhNbep8J/Pz8xOzuxsbGUwogdZdzEhMT0+scGgkJid8OHo0Oj2Kp0+k8JzlpZWVl7N69m7a2Nvr379+nULqH1NRUtFotNpsNtVrN4cOHz9j1aTQaHA4HLpeLqqoqysvLT2q0eVqgnOjd9vy2nqqcn1fpeF73NgwlGSfnidDQUK8H6LkwTKBT7yM3N5f33nuv1xZsd8ycOZPRo0dTXl7Ohg0bztj1VVdX43A4EASBL7/8kpkzZ7Ju3boe9+9OHyY5OblP8VwJCYnfBh6Njra2Nq/S7LOJIAhs2rSJ1tZWMjMzvToS9wW5XE52djZ79uwhLS2NQ4cOnbFrb21tJSAgALVajcPhICcnhw0bNvR4/Li4OGQymahYC53qsZ4+bCd6Sn7+urc6LZJxcp7w8/Pj2muvFV8XFBScsoTrl+JwOPjPf/5D//79u5Tg9hWbzYZSqUQQhDMmCgSI1np4eDiPPPIIWq2WN954o8ebRKVSdSth//MOrRISEhJJSUkEBASgVCpJSUlBEIQzmsjfHR5FX6VSSVhYGC0tLadtVFgsFlpaWigpKaG5ufkX60x5cLlcjB49Gn9/f+Lj48nPz2f37t09PpMUCkUXz4lOp+uVcXJin52TISXEnieqq6vJzMxEp9OJ7cXPduzT5XJRXl5ORUUFl19+OQEBAUyYMIHs7GxSUlJ6naDlcrnYsmULu3btOuPXuHfvXgCuvvpqDAYDWq2WoqIiTCaTFKqRkJD4RRw7dgyj0YhGoyEtLY3CwsKzHtYxmUyo1Wr8/f353//+Jy6eEhISGDBgALGxsb2qYGlvb2fv3r2o1WrKy8vPSK6J5/o6OjpwuVy43W6Sk5PZtm0bWq0Wo9HYo7z/2UYyTs4TxcXFvPLKK6jVaiwWC2azmZKSkh57LJwpPOI5nj49n3/+OVqtlmHDhnH11VfTr18/IiMjiY2Npba2tttS3HXr1vHBBx+IaolnKltcEASqq6uRyWRkZWXhcrno6OigsrKS1tZWyTiRkJD4RVRWVhIeHk5jYyO5ubkolcqzLtAol8tpaWkhNjZWDJ90dHSwb98+cnJyCAwMxN/fn8DAQMLCwjAajd0KdDY0NNDe3k5wcDAWi+W05Sh+jt1ux+12097eLub8aTQagoKCeswP6W1V6S9BMk7OE0lJSVRXV4sy1J622GcTrVbLTTfdxKJFi3C5XMhkMrRaLRaLhe3bt7N9+3bg/5eKWSyWU1YRRUVFMXfu3DNyfe3t7ezfvx9/f3+GDBmC1Wo9ZbJtREQEMTExFBYWAnDo0KGz1kVUQkLi4iY4OJi8vDz8/f1RKBS4XK4z2iOsO2JiYoiKiqKgoIDg4GCam5sRBIGEhASxtNloNFJSUoLb7UYmk3XrFZHJZKSnp9PS0oLRaCQrK+uMtB+orq4mICCA2tpakpKSaGtrQ6fT9dgEFjrnXU+iK3Q+v/qieN4bpJyT80R0dDRjx471es9jHJxNbr/9dmbPni0O6u5in2azmcbGxlMaJhqNhpdffpmMjIwzcm2tra10dHSQlpZGVFQUTqeT5uZmYmJieswh0el0Xi24jUZjrxUIJSQkflukpqbicrnQarXU1NQQFBSE0Wg8bRn43qDRaJgxYwZqtRqj0YhWq8XX15eOjg4GDx5MSkoKiYmJBAYGAp0ClImJiV7//Pz8MBgMtLS0iEbEhAkTzsj1tbW1ib9HXFyc6JXxeGm6w9fX1yunxOVynfHfUDJOzhNyuZxp06YRFRUlShgXFBSc9bwTvV4vtl8fNWrUaR3Dk9j13HPPcc0115yxa1uxYgUNDQ2EhIR4rQhCQkK8DJCT0draesYUEyUkJH5dBAcHEx0dTUdHBx0dHVitVqxWa5dGimeauLg4rrnmGmJiYpDJZKKhsWfPHoqLiyktLaWlpQWFQkFbWxulpaVe/1pbW2lubqa6upro6GiuuuqqMxLWcblcHDhwgMzMTJxOpzjPemTsTyVOJ5fL0ev1yGSyX6yP8nOksM55JDIyEqvVKg6yoqIi7Hb7We8UqtfrmT9/PjNnzqS4uJjdu3fjcDg4fvw4BQUFJ/1sRkYG2dnZDB48mNjY2DMae/Rkhk+YMKHXyV5KpZL+/ftz4MABoFOW/2xXPUlISFycyGQysVxWoVCg1WpFwcfw8PCzeu64uDhuuOEGSktLKSgowGKx4Ovri81mw2w2n3QuNRgMREREkJCQwJAhQ3q9WDsVnp44ra2tqFQqgoKCeqVbFRAQgFarxel0iuGdMx3WkYyT80hGRgZ2u12MedbX11NeXk7//v3Pyfn1ej2ZmZlkZmaek/P1Bh8fH6ZMmdLr/eVyudgv40R601BLQkLit0d4eDgdHR2i1olcLqeqquqczLtyuZykpCSSkpLE9zwFESdDr9ef1bYcx44dIzExsdfeGF9fX1QqFVarlfb2dtHQ02g0XobWibL1nte99bBIYZ3zSHR0NOPHjxdf9yYB9deK2+3m6NGjxMXFnXbvihM5VwaehITExUVqairNzc3I5XJRm+N8zrs6nY7g4OCT/jtbhoknnNXY2EhKSsovOpaPjw86nU5SiP01oNVq+fe//811112HQqHA6XSyf//+831Z54Xdu3ezceNG5s2b10Vvpbi4uNcZ9RqNBo1GQ1lZ2dm4TAkJiYucmJgYZsyYgc1mE5NTa2trz5lK94XE7t27CQgIQKPReHmblUolISEh1NbWdvs5t9uNIAioVCqxqkcQBLHZ38lE2HrbMkUyTs4zMTExfPDBB0yYMAFBEM56WdvJMJlM1NfXd1vtcjYTTZ1OJy+++CIOh4M5c+Z02d7e3t5jJnhRUZHXYA8ODiYsLOysaxdISEhcvGRkZHDttddSXV1NRkaGqNl0rrHZbBQVFbF3794u12AymcjNzeXAgQM4HI4zfu76+nqKiorw8/MT+/Z4OFFmojt++OEHmpubCQ4OZsqUKbjdbgICAnplnPQ2X0bKObkA8PHx4ZprrmHjxo3n/Nx2u52SkhI+//xzvvzyS+rr61m4cCHjx48nJiaG+vp6vv32W9auXYvdbueOO+7g2muvPaM5Hd988w2rV69m4cKFXvkvCoUCnU5He3t7j9olmzdvJi0tTbwB1Go1Go1G6mgsISFxUjwSBedaesAjmV9QUMCRI0dE3ZPNmzcTGRmJn58fHR0dlJeX09HRgUwmY8uWLYwaNYphw4adkYIJt9vNhg0bSExMpLKykqlTp4rbVCoVLpcLpVLZrXquzWYTw2BtbW3id5LJZDidTrHbMyAWNpz4+ue90HpCMk4uEEaOHIlOpztnK363283hw4dZtGiR2C3T49Z88cUXeeWVV1AoFF1aij/66KO8//77PP3008ybNw+1Wi0OyNMpbWtvb+f5558nODiYxx9/3OsYSqVSFAfqaeVQXl7upRxrtVqxWCyMHTuWr7/+us/XIyEh8dtAoVAQHR1NfX09giCIYZ6zidlsZu3ateTl5eFwOAgKCsLPzw+VSoXNZqOyslLsOuxyufD398fpdGKxWNi7dy+FhYXMmDGDkJAQmpubcbvdPWqRnAxPbx673c6wYcNEjRXozIHxLAa7690jCAJhYWFs3rwZf39/NBoNMpkMjUaDv78/TU1NKBQKBEEQxT5PfF1RUdGra5TCOhcIqampREdHs2bNGtrb28/quRwOB2+//TaTJ09m7dq1tLa2dom3ulwu7HZ7F90VmUxGWVkZt9xyC/fddx9ms5klS5awaNGi09Jo+eabbzh06BBPPvkkcXFxXtt0Oh0JCQk4nc4eM7yPHj3qdd7a2lqqqqrIzs4+Y7L6EhISv06SkpIwm83nJE+toaGB5cuXi3mFQUFBhIaGis0Iw8PDiY2NJTMzk0mTJhEWFkZUVBQqlQqlUklwcDDl5eV8+umn1NTUsGnTJr7++us+pwK43W62bduGn58fbrebYcOGeW0PDAzEbDajUqm6TRS2WCz4+PiIXpGwsDCUSiV1dXWivIQnJ8XTCPDE1yEhIb26TslzcoHg5+fHoEGD2LBhA0aj8az0kREEgT179vDmm2/yxRdfeIVKoqOjqa2txdfXl5SUFOLi4qipqRFdi+3t7WLYZMCAARw4cID33nsPf39/Ro8ezYcffsh1113HiBEjen09TqeTpUuXEhQUxGWXXdbjfm63u8fmXJ7mXZ74qCcpKyYmBn9//zNeey8hIfHrITIykvb2djFP7XQqBZ1OJ3a7vdtcCkEQ6OjoYPPmzRw5ckQM0wwZMoQjR45w9OhR1Gq1OBfL5XIEQfCSsY+JiaGlpYXDhw+j1+txOp0sX74cnU5HW1sb27dvZ/bs2b2+3vr6evEZM3jw4B5l6lUqVbclzj4+PjgcDjIyMmhtbRXzAV0uV68Mj96G0STPyQXEmDFjaGtr49ChQ6f1+S1btvDqq6928YKYTCaWLVvGXXfdxbRp0/j444+75HA0NTXhdrvx9/cnLS2N7OxsnnjiCbZt28a2bdu4+eabUSqV2Gw2UfDM7Xbz8ccfM2zYMN544w0SExP7dL1ms5n8/HwsFgv5+fk9el4cDkePOSQxMTEYDAZ8fX3FfZctW0ZMTMxJDR4JCQkJPz8/dDodOp2O48ePn9Yxtm/fztq1azGZTOJ7nvDFkiVLePvtt8nJycHpdBISEsKUKVPQ6XR0dHQQGhrKnDlzCAkJQavVkpqaSkxMDGlpaQwaNAhBEGhoaECr1aJSqUTRNqPRSEJCAjExMX2WTWhoaMDX1xeNRkNubm6PXmlfX1+v7+RBo9Fgt9txOBzY7Xb27NkDdGqlJCcnn7Lsubfhf8k4uYDIyspCpVJRWVl5Wp8/ePAgr7/+uvggb2ho4P3332fOnDnceOONvPPOO93KNMtkMqKiopg6dSpZWVlYrVaOHz/Ohg0buPnmm4GepfWrq6v5/vvvuemmm3rtrvs5ra2tXH755dxzzz1s374dQRAwmUwUFxcDnauPngyXfv36cfz4cS/XZm5uLjKZzEtDRkJCQuLnKJVKIiIiaGtrw2KxdJtjcSry8/M5dOgQx44dAzrnyg8//JD//ve/GI1GoqKiCA0NFb3hW7duZceOHUBniOSHH34Q+9jExMSQnJxM//79cblcGAwGlEolHR0dpKeni3O1QqHg+PHjzJ0797T0SWpra3G73VRWVvLBBx/w3XffYbPZqKioYMeOHWIIpicvh1qtJigoyCtp1hMCCg4O7rZKx+MJ6q1mixTWuYBISkpCr9ezefNm/vjHP/Zawt1DaGgoLpdLvMEefvhhPvroI699FAoFGo1GlC2WyWTMmzePf/3rX4SEhHhZtS6Xi6qqKqKiohg1ahSRkZFUVlaya9cucR9BEPj666/5/e9/3+csck8LcOj0orz77rt8+umnvPfee2RlZfGvf/3rlHksAwYMICoqyus9T3xz0qRJBAYGSpU7EhISPeIxTlpaWmhvb++xyWhPaDQaMYG1tbWVlStXEhgYSHJysljk4Ek+hf+vmqrX65kyZQqRkZFifpxKpRIFKceNG8fWrVtpbGykoaGBgoICcX5PSUmhrKyMxsZGIiMj+3S9nuaoLpcLtVpNR0cHBw8exGKxEBwcTGtr6ykVa4OCgrr18Mvlcvr3709VVRXgrQzrKSPureCdZJxcQMTGxjJz5kw2b95MQ0NDn3s9ZGdn8+yzzxIUFAQgWuoGg4GxY8cyYcIE0tLSGDhwIHa7nZycHGQyGVdccUW3N6RCoRCTVO+66y52795NU1OTaAx0dHRw6NAhduzYwebNm5k8eXKfrvfAgQNdPDltbW289957jBgxguXLl58yOTg8PJyHHnpIbBoInSsZo9FIQEAAer1eMk4kJCR6JCMjg927d6PRaCguLmb48OF9+vzw4cPZt28f/fr1E6tSBEHg+PHjuN1uDAYD0dHRREVF4XK5KCsrQyaTMW3aNOLj47s95siRIwFITk4Whc48PcPa2tpwOp2Eh4ezf//+PhkngiCI56+trUWj0aDVapHJZOTn5zNgwADa2tpQKpWUlpb2eJz09HTKy8vJycmhvb0dg8GA0WikqqoKPz8/0QA7sZQ4ODhYDAX1Bsk4uYCQy+WMGzeO5cuXU1VV1WfjJDY2lptuugno9ETs37+f6dOn89xzzzF48OAusb6MjIxeHzs5OZnk5GQA7r77bqDTs9LU1MTevXu9ekX0BpvNxrJly7p1G5pMJkJDQ4mMjMThcHQb9zyRwMBAL6+NzWbD5XIRHBxMRkZGr0vXJCQkfnv4+fnh6+uLw+Ggurq6z8bJoEGDGDBgAEqlkpKSEqKjo2lqamL48OEMHTpUDH+cDsOHDyczMxO73S4+1D3hl4qKCmJjY/t0vIaGBqqrq8XCAYPBQGNjo2hEhISEUFNTg1arxd/fv8cmqmq1mpiYGHJycnC5XKJR5nA4SEpKQqlUepUS+/r6olAoaGpq6n1T1z59M4mzzqWXXoqfnx95eXldSrz6glwu5x//+Ieon3I2UCgUhIWFMWvWrD5/trS0lNWrV3e77ejRoxQXFyOXy39Rh2GZTHbWOzxLSEhc3CgUCtLS0ti0aRM+Pj5iCKIveOYZrVZLSEgI48aN67PhcLJjK5VKr2qg8PBwsrKy+nysw4cPY7PZxJySn4umFRcXY7PZSE9Pp7Cw0Ev/pLf83PgwGAwoFArq6ur69LtKCbEXGNHR0SQmJrJs2bJeu7+6Q6vVMnHixDNumHis41/KW2+91aNHRKFQ9DnueyIWi6VXbb8lJCQkAOLi4vDx8aGlpaXHfjK9wVNYcKYME+j0ULe3t/9igU6TySRWWkKn51yr1WIwGMQwjK+vL2azGb1e3+teQzKZjICAAGQyWZcWJ55KSk+jxb4YJ9Ky8gJDp9MxYsQIli5dSmFhYZ9CL6eLp3NkbW0tBw4cEOvW5XI5gwcPRqvVYjQa2bx5MwcPHiQ0NJRnnnmmz6sLD83NzaxatarH7RaLhdLSUgYOHMjevXtPebyWlhav3jsOh+OsC9lJSEj8evAIiQUFBZGbm9vnJNPe4slH8XTndTgclJeXU1JSIi5G5XI58fHxqFQqTCYTx48fF4Xibr755i6NUXvL8ePHxXkxMTGRgoICbDYbERERJCcnc/z4caxWK1qttlcGWnt7uxiy8fX1RSaTdenF4+fnJ1ZSeqp3eqtzIhknFyBXXXUVH374IWvWrDkrxokgCKxYsYKioiJsNhvbt2/HbDZz9OhRURLZg5+fHwqFAofDIWZZP/LII7/o/KtXrz6ppoDdbqexsbFLFU5P7N+/36tW3+Fw9NikUKvVYrVaUSqVuFyu32QnUgkJCW/0ej0xMTFiI75Jkyad8ZCw2Wxm8+bNFBYWit6QE0ttT8STjCqXywkMDBRzYvpawelBEARycnKAzqrQ5uZm8XgNDQ34+PiI4moqlapbyYmfH6+0tFS8Hk/DvxMXhZ5WAFartdt+O6dCMk4uQNLT0wkKCuK7777jnnvuOeMy7G63m9zcXF599dVTehh+vj0jI4M//elPp+018YiknagnkJKSwpAhQ3C5XOzcuZOamhrxXB5r+2QUFBR02cejkeIhMDCQp59+mjFjxpCbmyv2hnj++edP63tISEj8uoiLixPbYdTW1hITE3NGj2+328VS5aioKKqqqtBqtRQVFeHj44PZbBaVYX19fVEqlbS2thIZGSn21OltR9+fU19fT3V1NXK5HIfDIS7mPEUSFRUVJCQkYLfbCQoKorGx8aThe0/ZtNvtpqamRuzDZjKZxDyWwMBAGhoaCAkJISEhgerqavz8/MT5/VRIxskFSHBwMGFhYRw/fpy2trbTFjfrCYVCwZNPPsnkyZN54403xNLlkyGTycjOzuaDDz7o0gOnL+zatYu1a9cCEBAQwI033sjjjz9OZGSkaI0vX76cAQMG0NTU1CvjpDs3Yb9+/YDOkrzvvvuO4cOHk5WVhcvlYsCAAbjd7tNK9pKQkPh1EhISgslkws/Pj4aGhjNunAQEBDBz5kw2bNjAoUOHMJvNGAwG5HI5crmc6Ohorx40HmPl6NGjTJs2jaFDh572uXfu3IlMJmPQoEEcPHgQPz8/Jk+eTGJiIoIgkJ+fT15eHv369aO2tpaKiopee5U9Hhi5XI7BYBCbGQqCgNVqJS4uDqfTSVhYWJ9yFiXj5ALC7XZz7NgxPv/8c4qKihg+fDgGg+GsnEsmkzFhwgTGjRvHsWPH+PLLL9m0aRMbNmzwGjwymYzExETmzp3L//3f/+Hv73/a5xQEgf/973+i2M/ixYu57LLLRDef51wPP/wwAG+++Wav4pMnSxzOzs5GpVKxfv16Nm3a5LXtdBoVSkhI/HoQBAGj0Uhubi6HDh0iJiaGioqKM74g9KDX67niiitobGzk8OHDbNiwgba2NgIDA2lra0OlUuFwOETDwN/fn9mzZ5OcnHza3mqTycSxY8fQarWUl5czZMgQpkyZ4tW/LTs7m1GjRuF2u/nggw/w9/fvkj9yIp4y4e5CUiqViujoaIqKiggKCqKkpMRre2/zASXj5AKgsrJS7DD5008/ieWzl19++VnvrCuXy2lubmbz5s20trZ6xQOVSiX33HMPDz/8MBEREb/4XDU1NaxYsQKVSsUjjzzC5Zdf3u1+FouFbdu28d577wGdN+jJcm9OdhOFhoaKOTOSMSIhIeFpj5Gfn8+RI0eoqanBbrcTGRlJWVmZKGl/NgkJCUEQBAIDA8VOvQEBARQXFyMIAkFBQYwbN45+/fqddgKsh6NHj6LT6TCbzfj6+jJjxgwxH8SD1WqlsrKS3NxcmpqaGDJkCPv37+9RJM5jnHj+/+dzqydHJj4+nujoaHG/0tJSSb7+QsdisbBnzx4+/PBDNm7cSFVVlZeXIDY2lquuuuqcXEt0dDSDBg3ihx9+ICwsTHx/1KhRPPPMM2esHPmDDz6gurqahx9+mHvvvddrm91u59ixYyxZsoTVq1dTXFwslhpnZGT0WeTN0zvinXfe6dLkUEJC4reHpx3HgQMHOHLkCHa7HUEQUCqVYuO9pqYmJkyY0GOn3jNJVFQUR44cISQkBJfLRVFREdApp3/55ZefkbCS3W5nx44dZGZmUlBQwNy5c1Gr1WKn97q6OoqKijh8+LCYBBsUFERFRQVut5vU1NRTnkOlUolNYS0WC01NTeTn5wOdpcQeWQiHw0FJSUmvve+ScXIOcTqdFBcX880337Bq1Sp27drV7ao/PDyc559/XsybONvExMTwwgsv8Ne//tUrUdUjbXwmyM/P59133wU6OyC/+OKL4jaj0cj27dvJy8vr4vKTyWTMmjXrpJNFd4PdYrGQl5fHp59+KlXkSEj8hqmvr+fIkSPk5ubS2toqrvJlMhkajYasrCyOHDlCeXk5qamponT82aZ///7Ex8dTWVnp9RwICgo6Y/kuO3fuxGazkZOTQ1tbG0uWLBG3uVwuOjo6xNCMr68vkZGR+Pv7k5eXJ+Y+dodcLvfy6nsW1i6Xiz179mA2m4mOjiYnJ8dr/pXJZL0W1pSMk3OAzWZj9erVfPTRR2zevNmrg+6JyGQyBg4cyD//+c8+96k5E/xS92FPmM1mHn30UTFL+/333+/V5xQKBQsWLOAPf/jDSfcbNWpUl8RZQRCorq6mvb2d2NhY5s2bx5AhQ8RtO3bs4NNPP5X0UCQkfoW4XC6qq6v56aefKC8vx+VyodVqxW66Wq2W1NRUiouLxQ7BGRkZzJw586yH0k9Ep9P1yjtxOlRWVrJt2zbsdrvoLTlRckEmk6FWq1Gr1YSGhmK326mpqaGkpERMmO0pBKNUKgkNDaWlpQWDweDVv6yhoYGEhASOHz+OXq8XK4xsNhvl5eVSQuyFgNlsZsWKFXzwwQfs2LFD/KOoVCrS09OpqanxUv274ooreO+99/rcU+dCxuFw8PLLL7Ny5cpef8bHx4cBAwZw7733Mm/evFOGlbpLFCspKWH27Nm88MIL3HDDDVitVr7++mtMJpOYjCbloEhI/LrwGCXr1q2joqICq9WKQqHAz88Pl8uF1WrFbDajVqsRBIHKykp8fHyYMWOGKF3wa6Curo6vvvoKh8NBeHg4GRkZHDhwAL1ej8lkwmAwUFtbi0KhwG63c/z4cWQyGTKZjNjYWGbNmtXr55CnpBg6m8EOGDCA48ePc+mll2IymSgoKBBz/lQqlaRzcj5pb2/n66+/ZsmSJWzcuBGXy4VOpyMzM5NRo0ZxxRVXkJ2dzZ/+9Cc+/vhjoNPF9/bbb/9qDBOHw8GOHTt4/fXXWbVqVZeqmxNbaSsUCoKCgsjKymLmzJlMnDiRxMREr2zyk+FZCZ3oGs3NzSU+Pl7sWPznP/9ZkrSXkPgVU1paKubvCYKARqMRdUM8lYCpqamsXr0avV5PeXk5CoWCSy+9lEGDBonHOZ3eOhcK9fX1bN++nfz8fIKCghgwYAA1NTVs2rRJrExyu900NDSI2iRqtZqwsDCSkpJITk4mKSmpS5PY7vCUCwuCgF6vx2w2Y7VaSU1NZfTo0axevZr8/Hwx50+pVDJ27Fgv783JkIyTM4jFYmH37t384x//EI0StVrN0KFDef7558nOzha9AIcPHxb1PnQ6Hc8991wXRdQffviB8ePH9/ohfaFw8OBBnnrqKdasWSMaDFqtFpVKhcViwel0MmzYMK655hrsdjvjx48nKSmJ8PDw03KpJiQkEBAQ4GWcxMbGolAoyMvL4/7775cMEwmJXymlpaXs37+f/Px8sYu5Xq/HbreTmZlJZmYmERERaLVa1qxZg9PpZPTo0WzdupXo6GgyMzOx2WxoNBqsVisbNmxg8ODBZ1zn5GzidDrZvn07W7duRa1Wk5KSQnh4ODt37sRisSAIgij0VldXR2BgIAMGDCAuLo6EhATCw8N7ZZCcSFRUFHv27MFgMJCamsq+fftQqVTo9XpycnI4cuQIcrkcPz8/0XNVUFAgKo2fCsk4OQN4MqKfeOIJ9uzZg8ViQafTMW3aNO69916ysrK8DIy6ujoWLFhAbW0tcrmcxx57jNmzZ9Pa2srnn3/O7bffjkwmY/HixVRVVXH77befx2/Xe9xuN8uWLePxxx8XS/KmT5/O1q1biYmJ4aqrriIqKoqWlhY2bdrEm2++SUZGBgkJCaSnp5/RWO+sWbNobm5mwYIFVFRUdNk+YcIEYmJi+PTTT8/YOSUkJM4Nbrebqqoq9u7dK1be2O12dDodgYGBpKWlMXjwYJKTk8UwwuHDh8nPz2fYsGHs378fmUzG1KlTKS8v59ChQ0ycOFH0tOzYsYM5c+ZcFF3NrVYrX331FWVlZaSmplJSUkJ+fr4YviouLsbtdqNUKklISMBgMODn54fNZkOn0xEcHNxnw+REVCqV2FsnLCxMlMZwOp2MHDmSpKQktm/fTn19PWazWUqIPRd4lPXuv/9+tmzZgsViQaVSceWVV3L//feLAmAnYjabue222zh48CAAAwYM4O6770apVFJdXc13330nGiNTpkzhb3/7G6NGjWLw4MHn/Pv1BbfbzX//+1/+/Oc/iyXAcrmchx56iDFjxvD000/zwgsvoNfriYiI4Oqrr+aKK65g+fLl3HbbbURGRnLbbbcxf/58+vfv/4uuxZOs9dlnn7F///5u99m6detp96mQkJA4f3i8GwcPHsRms+FyuXC5XBgMBgYMGEB2dnaX8Pjx48f54YcfsFgs4up93rx5YqO/kJAQLBYLISEhVFdX09zczE8//cSUKVPO07fsHVarlS+//JLGxkbCw8Ox2Ww4HA5iYmKwWCwYDAb69esnysobjUbUajUymQyDwcCGDRvYunUrgwYNYsyYMb3WIPk5nsICrVbL9u3bcblcxMfHU1RURE5ODmq1muTkZBwOBxqNhjVr1pzymNLsfJo0Njby8ssvc8kll/Djjz9it9uZPXs2y5cv54svvmDixIldDJP6+nruuOMO8Q8TFhbGa6+9Jsqo5+TkeIUmZs+ejdvt5q233vLqunsycnNz2bRpE2VlZb3Oiv6l/NwwSUlJITs7G7vdzpNPPsktt9wiVsp41ApffvllNmzYwJIlS7jnnnuorq7miSeeYOLEifz973/vsXGfhxNLnvV6PcnJyeJrf39/4uPjWbx4sVcFj0qlIjY2loSEBAYPHtzrxoISEhIXBvn5+Xz00Ufs2bMHpVKJTCbDx8eH9PR0br31VubMmdPFMCksLOR///sfQUFBaDQaOjo6GDZsGGlpaUBnON4jmaBUKhk4cCAqlYri4uJTtvWATs/5hg0bWLFiBVu2bKG6uhqn03nWJQw8HpOSkhISExMpLi4WF73V1dXExMRQVVWFRqMhODgYf39//Pz88PHxIS0tjYaGBiwWCx0dHWzdupV///vf5Obm9qgLZbVaqa2tFb+bR+Cyo6OD/Px8r+72iYmJ1NTUiI1kzWYzBQUF1NXVSb11ziZFRUUsWLCA3bt3IwgCKSkpPPHEE1x99dU9NmYqLCzknnvu4ccffwQ6kzifeuoppk6dKu5jtVq9Ekejo6O56aabePXVVxk5ciS33nqrKBzUnf6Iw+HgoYceYuPGjfj6+jJkyBDGjRtHVlYWiYmJotqfXC4/Y2XDDoeDjz/+mHvvvVf0mISEhHDttdeyfft2du3axebNm7n33nu59dZbRaNi8uTJVFRU8Mwzz/DGG2/g7+/Pq6++Sn19PU888QSff/45119/PXfffbeXhL8gCLz//vv873//Y9SoUYwZM4YJEyZw/fXXs337dvH3q66upqioCIVCQWZmJmPHjmXGjBlERERQX1/PoUOHMJlM/P3vfz8jv4OEhMTZw+l0snv3bjZs2ACAWq3GYrEQFxfH1KlTu80PcbvdbN++nR07dpCYmIi/vz+NjY3079+fqVOniqEMTy8bD4MHD2bfvn1ERUWxZs0a5s+fT0NDA4IgEBsb2+U89fX1HD58mPT0dIqKiti0aRO+vr6EhYUREhJCdHQ0MplMlHX3CL/9EnHL+vp6fvzxR4qLi0lISCA3N1c8h06nQyaTUVtbS1RUFBUVFaSnp1NYWIjRaESr1VJRUcFll13GunXraG9vJyMjg/r6er788kuCgoKYPn26V0jMYrGwYsUKamtr8fHxITg4mMGDBxMeHk5TUxODBw9mz549VFRUEB4eTn19PQ6HQ9RP0Wg0NDU1iX17eoNMuAgVqtra2vD396e1tfWs9Z7piT179nDjjTdSWFiIwWDgjjvu4J577jlp8tTu3bu59tprxRbT48aN48UXX2TkyJFeWeEvvfQSq1atYt26deKgaGpqYuLEiVRUVPDll19SU1PD0qVL+fvf/05WVlaXrPK8vDwWLlzIgQMHRENALpej0+nEfhG+vr5MmDABi8VCUFAQw4YNIzg4mEGDBhERESE2ceoJT/fJ9evXi4quJ3p2rrvuOmbNmsWCBQsAmDNnDv/5z38YN24cR44cAWD+/PkMHjyYFStW8OWXX5KQkMDq1at56KGHyMvLE6972LBh/O1vf2Pq1KmoVCpKSkoYO3YsSqWSmJgYDh06xKBBg/jggw+46qqrKC4uJisri7Vr17J582bcbjeJiYksXryYXbt2UVRUREtLi9hgCzin4+h8jl2JXw/nYxydr7HrcDj47rvvKCoqwt/fn7q6OgICAhg3bhxDhgzpNl9CEAQ2bdrEjh07iI6Oxm6309DQwNChQ5k4caK4iHQ6nSxZsgS1Ws2ll14qytbn5uaya9cuEhISaGlpQaVSiSJt48eP91qECoLA7t27OXLkCG63G4VCQWNjI1arFZlMJnqwPV4eT/6Hv78/er2exMREoqOj0ev1XX5XuVwuzlVGoxGj0SgaAXa7HV9fX9RqNa2trWg0GgRBQK1Wk5SURG5uLpdccgnr16/Hx8cHQRDo6OggMDAQjUbD8OHDSU9PZ926deTn5zNo0CAsFouYyDpw4EDGjx9PWFgYW7du5dixY0CnfkpSUpLY8K+kpITIyEhqamq47LLLxIa1arWasrIy/P39CQ0NFWX6N2zYwDPPPHPKcfSLPCfPP/88jz32GPfeey+vv/460Ln6f/DBB1m2bBk2m43p06d3KZEtLy/nzjvvZOPGjej1em666Saee+65Cz75aNWqVfzpT3+itLSUrKws3n333R5vDg9Go5E///nPlJaWEh0dzV133cXdd9/dbQVOYGAgRUVFNDQ0iL9XcHAwTz75JAsWLODuu+9myZIlZGZmMmfOHG644QYeeughQkNDxWOkp6ezdu1aNm7cyJ///GdRFr+jo8MrS9pjAHhQKpUEBgaSnJxMYmIiWVlZmM1mHA4HCoWCpKQkioqKEASBvXv3UlZWRkFBgVd4BWDYsGE888wzrFq1Snyvrq4OvV7PjBkzRONk//79vPPOO9x3332iF2fmzJmkpaXxyCOP8OWXX+J2u9m7dy/z5s1jxowZPPHEExw+fJiGhgb8/Px4/fXX+frrr3nllVf49NNPmTRpEsXFxVgsFmQyGdOnT2fZsmVceeWVlJWV4evrS2pqKrfeeiv79+9n3bp1Xtf+ax67Er8ePPPuifxax67VamXt2rXk5eWJImKjRo1i7NixJ61iLCwsZMeOHahUKqqrq/H39+faa6/1Cv9Cp9CjzWbj2LFj9O/fXzRO+vfvL4Y4iouLxTy4Xbt2UVhYyKRJk+jfv78YWho5ciT+/v7s3buX2tpacTGYlpaGTCbDbreze/dubDab6M3weJoLCwvF0mc/Pz+cTqeYmOvv7y+W3prNZi91W61Wi8FgoKGhAblcTnZ2Nrm5ubhcLvz9/XE6nYSHh6PX65HL5aSmprJnzx4yMjKIjY0lOTkZpVIpSuVv2LABu91OUFAQNpuN3NxcCgoKGDVqFHl5eeICNDw8HKfTSXJysthMtbi4GB8fH3x9fZk8eTIrV66krKyM8ePH09jYSH19PVarlaqqKiorK3v1tz/tUblnzx7ee++9Loma999/P99//z3Lly/H39+fu+++m7lz57Jt2zagM1fgsssuIyIigu3bt1NTU8PChQtRqVQ8++yzp3s5Z5X29nbeffddXnzxRZqbm1m4cCF///vfiYuLO+VnFy9ezN69e5kwYQL//Oc/xdyL7hg1ahTNzc3s2bOH2bNni+9feeWVXH/99SxevJhbb72VlStXMmzYMB588EFWrlzJ3XffzbXXXktgYCAymYzAwEBCQ0NJTk6mqqoKPz8/fH19aWxs7FF4zOl00tDQQENDAzt37uSzzz7z2v5zBdbuGDZsGP/+979JTEz0UsH1uBmvv/56/v3vf9Pe3o7T6UStVncJLyUlJbF48WICAwP5+OOPsVqtWCwWvvrqK7Zu3cqdd97J3LlzWb58Oc899xzvvPMOOTk55Ofnk52dDXTGpdevX09hYSF//etfcTqd3HLLLSxatIjY2Fi2b9/O0qVLiYmJ8bpRfo1jV+LXhWfezcjIIDc3V3z/1zh2S0tLWbNmDbW1tQQEBOB0OpkyZcpJ51DozAHZtGkT/fr1o6ioiDFjxvSY7OnRP2loaODo0aNkZWUBnYu1qVOnsnjxYsxmMwcPHmTkyJE0NzfT0NDA2rVr2bFjB6NHjyYxMRG9Xk9SUhKVlZWUl5djMBioqKgQG93pdDoiIiKwWq3I5XIxlKTT6TCZTKhUKnGObWlpISwsDKPRSFtbmxiqlsvlyGQyFAoFgiDQ1taG0+nE5XIxceJERowYwf79+/Hx8cHpdIrh+/T0dPbv3y+GkSIiIsR8G+g00LKysoiKiuLzzz+npaWFIUOG4HA4KCgoEIs9fHx8SE1Npbq6mlGjRrF161ZxcRoREYHD4WDr1q1iTopSqWTXrl0MHTqUfv36YTabqaysFPvunIrTSog1mUzccMMNvP/++2IyJ3S6xz/88ENeffVVJk+ezPDhw1m8eDHbt29n586dAPz444/k5eWxdOlShgwZwsyZM/n73//OW2+9hd1uP53LOascOHCAuXPn8thjj2GxWHj99dd55513emWYmEwmfvzxR5566im+/fbbLjfVz70OERERhISE8N1333nlnqjVap599lmmTJnCkSNHuPbaa+nfvz9r164lMzOTRx99lFGjRnHPPfewdu1ampubOXjwIPv27QM681seeOABli1bxrPPPktUVBRBQUEMHz4cX1/fXv0OvTFMPvjgA4YPHw7g5U2aOXMmCoWCjIwMMjMzgc7GfMuXL+/2WHq9nrfeeovXXnvNy33a0NDAK6+8wpYtWwD49ttvefvtt7niiis4duwYAwYMEN2gTzzxBC+88AJOp5OgoCCxb9ATTzzB7NmzaWxs9DJMfo1jV+LXxYnzrqeZGvz6xq7b7Wbz5s18+umnYmK/Xq/nhhtuOKVhAlBRUUFISAhGo5FrrrmGiRMnehkmP1+khYeH4+vrS1NTk9j8Djq91rNnz0ar1aJUKiksLCQsLIyBAwciCAJms5m1a9fy/vvv88UXX3Do0CEaGxtRKpVibkp9fb1YKVNUVCQaLIAYZpHJZKKHIz4+nrCwMNRqNZGRkYSGhqLT6fD19aVfv34MHjyYlJQUUXoeOmURJkyYgEKhQCaTiecKCgoiNDSUjIwMnE4nHR0dhIaGcuDAgW6TXg0GA1dffTUJCQns37+flpYWpk+fTmJiIjqdDrfbTWlpKQqFgn379qHVaomNjUUul2Oz2RgwYAAVFRXU1dUREhKC3W5n5MiRqFQq1q5dy+rVq8X+Rr3htIyTu+66i8suu4xLL73U6/2cnBwcDofX+/379ycuLk7sX7Bjxw4GDRrk5W6cPn06bW1tosv/59hsNtra2rz+nQvWrVvHvHnzWLduHdHR0bzzzjv86U9/6jHp9efo9XqWLVvGX/7yF6/mdIIgcPDgQR599FGvXI2QkBAxRvjzni8REREsXbqUGTNmkJOTw1VXXcXhw4f56KOPWLt2LSNGjOC///0vs2bNYvTo0TgcDt577z0CAwMpKyvjkUce4aGHHqK9vZ2goCAMBgNPPfUUW7Zs4bPPPuMvf/kLc+bMwd/fn/79+5OWloZcLqdfv34ndfsGBweLgmtDhw4V309ISAA6mwdOmDAB6DSyPO5Rq9XKX//6VzZt2uQlkHbw4EHuvPNOnn76aWbNmsXbb7/tZQh2dHRQW1srvn755ZcpKChgxIgRYu4MwJEjR0R36KxZsygpKWHGjBm89tprOByOLpVMv7axK/Hr47cw7zqdTr755ht++ukn5HK5WIlz/fXXiyGXUxEfH8+AAQO48cYbSU5OFvPyPKrVO3fu9ErK9Oxjt9u7JGv279+fq666SvR+lJeX09jYyJVXXikmjIaHhxMcHMyBAwfECpjc3Fyys7PJzMykf//+XrmB/v7+ZGZmEh4ejp+fnygjYTQaycnJwWg0UlZWRlVVFRaLhbCwMCIjIwkKCqKkpISjR4/icrkIDg5m7ty5TJw4EblcLibDNjc309TURFxcnBiu12g0FBUVIZfLKS4u5vvvv2f37t1iCGnTpk28/fbbfPvtt6IwW1VVFT/++CMul4vExESUSiUOhwO3201rayuNjY0kJiYyZswYsZLHYDAQFRVFfn4+crmcmpoaNm/eTFtbG0FBQWRkZJCRkdGrv2OfjZNly5axb98+nnvuuS7bamtrUavVXlY9dFqmngdKbW1tl1Ivz+sTHzon8txzz+Hv7y/+6y5j+kyzadMmfv/734v5JStWrGDBggV91sbwhFqgc0VQU1PD4sWLmT17NsOHD/equpHJZPTv35+ysjLWr1/f5VgtLS1MnjyZ++67j+bmZm677TbuuOMOoqKi+Pjjj1m7di3Z2dmUlJSwaNEiL+MnKSmJjo4OnnvuOXJzcyktLWXBggUsWbKEI0eOEB0dzbvvvsukSZNIS0vjiSeeQKVS9RgKkslkDB8+nC+++IInn3xSTLb14PnMiBEjvAbjrFmzRK9KdXU1M2fOZOTIkXz88cd8+eWXXHnllbz77rs899xzPPnkkyxcuFA0Ek8szQ4LC+O+++7D5XLxzjvvoFarxZyZE0lPT2fcuHEsXLiQqqqqHv9Ov6axK/Hr47cw75rNZr7//nvq6upwu904HA7GjRvH/Pnz+1TZolQqSU9PFxeRZrOZ3bt38+mnn3Lo0CFUKpXXgsfHxweVSkViYiIHDhzwOpYnzGI0GqmpqSEqKoqQkBC+//57YmNjueGGGzAYDBQVFTF48GDGjx9PTEwMvr6+WK1WGhsb0Wq1OBwO4uPjmTZtGk6nk2PHjokiaSNGjECv16NWq8XeM57Qj16vJzAwkJKSEnbu3InJZEKpVDJs2DBuv/120tPTvZ5JgiAwbtw4dDqd6GXS6/VERUXR3t5OZGQkgYGBHD58mFWrVrF06VI++eQTtm7dSkREBIIgUFtby/Tp05k8eTJyuZyKigr8/f0xGAy43W7q6+uRyWQEBARw+PBhoqKiUCgUNDc3I5fLaWpqEsNNxcXFhISEMHToUCIjI7s0HzwZfXrSVlRUcO+99/LJJ590W8p6tnjsscdobW0V//W2FOl0EASBlStX8rvf/Q6j0chf//pXNmzYIMYifwk7d+5kwoQJLFq0iAULFjBv3rwu+1xzzTUYDAY+++wzL6PAZDKxYMECHn30UeLi4vjmm28YO3Ys//3vf5k9ezZr1qwhIyODVatWsWzZMsaOHUtzc7Oom5KcnMzYsWMZNmwYCQkJKJVKWlpaeP311/nHP/7BPffcw9ChQ/n+++/54Ycf+MMf/oDNZuvSQVmtVpORkcHrr7/OmjVruu2e7HQ6WbFiBdC58jhxYpkyZYqXyJonSeqmm25i/vz5lJWVoVAouOOOO3j11VeRyWSkpqaydOlSHnvsMTEJrrGxkbi4OBYuXIjL5UIQBHQ6Ha+//jpz5swhOTmZIUOGkJWVxVNPPeXlnVEoFCiVynMiwnYux67Er5PfwrzrKVVVKBQkJycTERHB7373OyZMmNBFL6qv7Ny5k6qqKvH6x4wZ4xV2ViqVZGZm0t7eTlNTk5eC6bFjx9i6dSuhoaHo9Xra2tooKipi9OjR7N+/n59++onBgwczffp0CgsL2bJlC4GBgYSEhFBTU4NcLufYsWMIgkDF/2vvzKOjKLP+/0nSnc6eTiArWxIgbNlI2BLZZRUlKExQBFEUB0RFEOUMOCLvK7KIzigMjPoTEEYElFEgBEJkTQgQCISQhaxkhexk7U6v9fuD0/USCRAgCsH6nMM52l3pqqfqVtV97nPv9xYWEhsbK0Y3QkNDGTZsGEVFRWJ/ML1ej16vR6lUEhwcTGlpqahdAogClqblppspKyvj+vXrlJSUIAgCzs7O4nf9+/fH3NxcFKAzaZIUFBRQUlKCXC4Xo8khISG4u7szePBgZs6ciZubGxcvXkSv1+Po6IhCoRAnp42NjXTu3JmwsDD0ej01NTXU1dXR2NiIQqEgICCAhoYGkpKSSEtLIz09/fdJiE1MTKSsrIzg4GDxM4PBwIkTJ1i/fj3R0dFotVqqq6ubePGlpaViSM7d3Z2EhIQmv1taWip+1xwKheK+levuBZ1Ox+7du3nnnXfQarX861//Ytq0aQ8k7Xsz/fv3Z/fu3VhZWYlhst/SpUsXBg4cyNGjRykpKRFLlLVaLUVFRRgMBr744gvi4+PZtWsX33zzDRs3bmTKlCkMHz6cVatW8dxzz/H0009z8eJF1q9fz9GjRzl27BharZaXX36Zjz76iMLCQrZv386RI0fw9fWltrYWQRDo3LkzFy5cuKX/gVwuJyAggA8//JCRI0feUSclJydHvAmnT5/e5DsHBwcWLVrEggULmnjQpgZSJsfk008/beLUKBQKPvzwQ4KDg3n11VeprKxk5cqVLFq0CAcHB86fP49Go2HgwIH8+OOPqNVqTp8+zYQJE9DpdLi4uFBeXt4ksdeU1+Ps7NzmbVfi8eV2z114PGy3oqKC/fv34+zsjI2NDcXFxUybNq3VeooFBASQnJxMWFgYffr0abZ8tUePHiQmJqJUKsnKyhIjwXV1ddTV1REYGEhKSgrdunWjf//+REVFiVorUVFR+Pr6Eh4eTnV1NRcvXiQzMxOj0UifPn1wdXXl7NmzdOzYEYPBICbJ3qxSrdfrsba2pkuXLshkMmpqakhLSxOfid26dSMkJARvb+/bOmupqaliPkr37t2bPKO9vb3x9fUlPz+fxsZG2rdvj7+/P+fOnSMoKIiUlBRUKhVTp07F1dVV/Dt3d3emTZtGTEwMmZmZBAYGkpmZSVFRkaiZUlFRwciRIwkMDKSsrIzo6GhsbW0pKysjKSkJQRCwtbWlc+fOWFpailVKd+Oepo5PPvkkly5dIikpSfzXr18/XnzxRfG/5XJ5kyWJjIwMCgoKCA0NBW54rZcuXaKsrEzcJiYmBgcHB3r37n0vh9NqCIJAfn4+S5cu5ZVXXsFgMLB161amT5/exDHJzs4Wb+j7wfSC9/X1va2BWVhYiH1hTMlscCM0161bNwByc3P59NNPsbW15d133yU+Pp4PPviAM2fOMHr0aBYuXMilS5fEPJTz588TGRnJhAkTxKhQYWEhy5Yt4+TJk/z3v//l0KFDbNu2jYULFzbJj1EoFIwYMYKffvqJX3/9lYkTJ4pGfzsVxD179lBVVUVISIh43W/mpZde4sCBAwwePFj8zMzMDC8vL5YsWXKLY3LzuQkPD2f37t14enpSVlbG8uXLUavVZGVlcebMGXE7Ozs7jEYj5ubmfPDBB/znP//hu+++48KFC1y8eJGkpCSxE2lcXFybtV2Jx5/mnrum/K62bLv19fUkJCSwadMmKioqMDMzo6GhgWeffVZ0TARB4MKFCyQnJ9934m779u0ZOXIkI0eOvG3XdxcXFxQKBTY2NqJkgulzCwsL5HI5NjY2HDt2DGdnZ2bPnk1wcDCNjY20a9eO4uJiNm/eTFpaGsOHD2fOnDmMGzcOnU7H2bNnEQSBU6dO0dDQwMCBAxk6dCgvvPCC2IHdpFUCN57vlZWV6HQ6fH19iYiI4Pnnn8fX1xegSRmyCYPBQEpKCn369KGwsFAsTDChUCiYNGkSgYGByGQyqqqqxEloXFwcMpnsFsfEhIODAxMnTqR79+5cunSJHj164OnpSW5uLiqVShSAa9++Pb1798bDwwNLS0s0Gg2Ojo64u7vz0ksvERERwbhx4/Dw8GjRdXtgEbbhw4cTFBQk6pzMnTuXqKgotmzZgoODA2+99RYA8fHxwI2TGBQUhKenJ2vWrKGkpIQZM2bw2muvtbikrbXEgHQ6HZcuXWLv3r1s2bKF/Px82rdvL+aE/Javv/6aL774gtmzZ/P666+3ODH2Xjl//jxDhgzhxRdf5OuvvxY/f++991i7di1ww1k5fPgwAwYMAG7cxImJiaxYsYI9e/bg7OzMyy+/zJw5c8SEL51OR15eHnv27OHbb79Fp9PRs2dP/Pz8OHPmDOnp6VRWViIIAh07diQiIoKJEycSEhIiOgsqlYr09HSOHz9OfHw8q1atEp0muOGwPPPMMxw7doytW7fyl7/85bbjrKioYMaMGRw8eJB27drx5Zdf4unpiZmZGQqFgt69e9/2+sbExPDaa69RUFAgftavXz/27NkjytLHxsYyZcoUsaSwX79+7Nq1i7KyMv71r3+xbt06NBqNaEdtyXYl/twMGTKEuLi4Nmm7paWlnD17loKCAqqqqvD39yc7O5vu3bvz1FNPNYko63Q6Dh8+TFpaGg4ODjz55JN06dLld1mSjYqKoqamhuLiYubMmYOdnR1qtZoNGzagUqnE8t+goCDx/aBWqzlx4gT5+fn4+flRU1NDdnY2QUFBBAQE4ODgQGlpKZcvXyY5OZnq6mqxE/K1a9dITU1FqVTi4+NDaWkpOTk52Nvb4+PjQ0hICJ06dRKrZAoLC0WRN7lcLkonwI28oR07dtC3b1+uXr1KRETEbcXpsrOz2b17N4IgoNVq0ev1+Pj4iIJuPXr0oEuXLre833Q6HXv37hUF2/Lz88Uk6fDwcHGyFxkZyeXLl6mrq8Pb25va2lqeeeYZysvLiY2NJSEhgV27dt3VjlrdOTGJAf3www9NxIBuDh3m5+czd+5cUeZ35syZrFq1qsViQA9ykxgMBoqKijh79ixbt27l119/FZX8XnzxRd566y369+/f7N/q9Xo+/fRTPv74Y/r27Ss25WstKXgTarWa0NBQampqOHXqlHjufvnlF6ZMmYLBYEAmk7FgwQLWrFnT5G/r6upYt24dn3/+OZWVlbi6ujJu3DgmTZpESEgInp6eoud87tw5ysrKiI2NpaysTCwLHzFiBOPGjcPFxQWNRkNxcTEZGRlkZGTw/fffYzQamT59Os8880wTxwRuGObUqVMZO3YsO3fuvOt6cUpKCuHh4eTm5jZ54MhkMnx8fBg/fjwLFixoNhlv3759zJo1q8ka8WuvvcYXX3yBjY0NKSkpjBgxgoqKCkaMGMF3331HbGwsS5cuFcv54P8UYh9125WQMPFb5+RRt92qqipSU1O5cuUKxcXFKJVK7OzsqKioEJc/Ro0a1eyx1NbW8ssvv6BUKikuLsbd3R0/Pz+6dOnSqp3Ms7OzOXDgAA4ODvj7+4vLaP/5z38oLy9HoVBga2uLRqNh5syZTZa8ioqKOHDgAC4uLnh6eorjdHFxwdvbm27duokim1euXMHS0hILCwtSU1PF94+rqyv+/v50796d69evk5eXx/Xr17GysuLatWv079+fkpISHB0d6du3r3iuBEHgp59+wsHBgczMTCIiIm4bITIRFxfHiRMncHV1paysTNRFMTc3x8LCAltbW4YPH35LZY1OpyMqKork5GQsLCwYPHiwKHZnqqg6fPgwxcXFXL9+HXt7e/z8/MjIyODKlSsolUqys7NZt27d7++cPAzu5yYxGAwkJSXxxRdfEBkZSW1tLQaDAWtrawYOHMizzz7LrFmz7upo3OygGI1GevTowYsvvsgLL7yAm5vbAydvwQ1je/vtt9m4cSPbtm3jhRdeAG4kPJkk67du3YqXl5fYL+dmjEYjGRkZ7Ny5k8jISC5evIggCCiVSgYMGMCrr76Kj48P3t7eTc6PhYWFKJSTl5dHYmIiJ0+eJCUlhXbt2jFs2DAGDx5MeHi4WLJ7M2q1mqeeeoqMjAwOHz5Mr169WjTeXbt2MXPmzGabG7Zr147t27czZsyYZv82MTGR119/XdR0MTc3JyIigtWrV9OpUyex1HrNmjWcOnWKGTNmoNFoUCgUeHt7c/ny5T+FBLjE40Vbka+vq6sjLi6OrKws3N3dcXFxoaysjIyMDOzs7LCxsSE8PPyuof7a2lr27t0rvnSzs7MxMzMjKCiILl26tHip4E7U19fzzTff4Ofnx9WrV8XqzMTERJKSkmhoaMDW1hZ/f3/69+9/S+sQlUrFmTNnuHTpEubm5mLl4PXr1ykrK8PZ2VnstWPSSVGr1SgUCjGCW1dXR2lpqRipsbCwYNiwYZSWluLn50f37t1veccUFBSwZ88e0TEySTfcCYPBwM6dOykpKcHZ2Rl7e3u0Wi1ubm5kZmZibW0t6rz8Fp1Ox9GjR0lISMDa2hp7e3vMzc2pra1lxIgReHl5sWfPHgoLCxk+fDh5eXk4Ojpy4cIF5HI5Fy9e/GMiJw+De71JKioqWLNmDf/+979paGigY8eODB8+HFtbW1544QUGDRp0T06FwWDg8OHDLFu2jMTERPR6PS4uLoSEhLBgwQKGDBkiZjTfL6dPn+bJJ58kLCyMAwcOIJPJ0Ov1TJo0iSNHjnDixIm7VhCZRH7i4+PZv38/p0+fJjs7m7q6OuRyeRMBPRM6nU5UJfTx8WH48OEMHz6cJ554QmxgdTvWr1/P+++/z+eff86cOXNaPNaamhoCAgLEJRqZTEa3bt2YOnUqffr0QSaTERoaipubW7P7z87OZubMmZw7dw5BEDAYDPTq1YshQ4bQs2dPSkpKRIfJzs6O+fPnExYWhre3Nx07dnzkH/ASEr/lUXdOBEEgPT2dEydO0LFjR1Gj49q1a5iZmdGtWzf69euHh4dHiwsO6urq+PXXXyktLaVnz54oFApycnK4fv06Hh4e9O3bF09PzwdqqBcZGQncSOqPiIjAw8OD0tJSfvjhB7y9vbGwsGj2hf3b48zKyiIzM5OrV6+KZd5VVVUoFApRTt9UbmxKLFUqlajVajGS4ubmhr+/Pz169MDe3r7ZZ5+p8aq9vT319fVMnz69xe+y5ORk9u7di7W1NZ07d+by5cvo9XpRxdZU8ejv799sYnRiYiIHDhwQpfeDg4PJz88XOxHX1tZiZWVF7969sbS0FFu45ObmsmjRoj+3c2LqAzN37lwuXrxIz549+etf/8pzzz0n5iU8CPX19Zw9e5Y9e/Zw8OBBsrKyRKGx8ePHi70c7mfZp66ujv79+1NUVERMTIyY2BYdHU14eDhr1qzh7bffvuNv5OTksHbtWpYtWyZKJxcXF1NQUMDZs2cxGo2kpaVRWFiIq6urmGRnZ2fHwIED8fHxaTZC0hzZ2dmMHz+e3r17s3PnznsqeTQYDGzatEks+R04cCBvvvkmmzZtIjExkYaGBjw8PPjoo49Eye3fYirxEwSB1NRUfvjhB3JycsjJyWmStDt37lxWr16Nvb39I/+Al5C4HY+y7TY2NhIXF0dBQQHdunUjPT1dzGXw8vJCqVTed76e0WjkypUrXLhwgaKiIuzt7QkMDKS+vl6U8zf1B+vcufM97yc9PZ3Y2Fi8vLzQaDQ888wz4rKJWq2mtraWefPm3XGSFhcXR1lZGX369KFz586UlpaSn59PWVkZV69eRRAEsZmfmZmZuJytUCjw9PTEx8eHrl27tujYExISuHjxIhqNhkmTJt2xAe1vqaurY//+/WRkZODi4kJNTQ0ajQZLS0t0Op14fKbl9d9qWcGN5356ejpGo5GqqipKS0uRyWQ4ODigVqtpaGjA2tqa2tpawsPD8fHxISoqihdeeOHP65yoVCo2bNjAqlWr0Gq1LFiwgEWLFmFvb8+ZM2f49ttv+fDDD+/pYt6J69evs3PnTtavXy8q+Nna2tKpUyfGjBkjJpY6Ojo2a9hVVVUcPnyYYcOGiRnTa9euZfHixQwZMoR9+/aJ3vGIESNYuXLlLUqRv6WyslKUNv7HP/7RrMNgKuE1MzO770hPbW0t48ePJy8vj5iYmCbZ/9XV1dTW1tKxY8e7JrHl5OQQGRnJtm3byMnJuUWsR6FQ8Pzzz7N8+fJml7NuxnSzLFq0iP/85z9i6aVcLqd79+6EhITg4+PD8uXLH8kHvITEnXhUnZOSkhIiIyNxd3fHxsaG/Px8wsLC8PLy4vjx4+Tk5DBs2LBWqRBSqVSiOJmZmRl9+vTBycmJw4cPo1arEQRBVIvt2rVrsxGVtLQ00tLS6NatmyhLv3nzZsLCwsTotY+PD5mZmZw+fRo7Ozuee+65Ox5XSkoKBw8exNHRkUmTJjVpzGqSL6irq0OlUmFlZSVWR97rMzgrK4tjx46JImvDhg0T95GRkUFlZaWYlHs7dDodZ86cISUlhcbGRuzt7Wnfvj06nY6ysjLRuVCr1YwePZrAwMDbHqPRaKSiooKSkhKOHDlCY2OjmJOYmpoqRlgyMzPZsmXLn9M5ycnJYcmSJfz000+4urry1Vdf8fTTT4svx6NHjxIeHk5wcDBbt25tUZ+cllJVVcXJkydZt24dx48fF8vf5HI5Pj4+PPfcc0ydOrXJPk3NnMaMGUNFRQXz58/npZdeAmDixInExsayfPlyFi9ejFwuF1tUtyR8uWnTJt5++23WrVvHyy+//EBLTc1hNBpZv3497733Hl988UWT5ZyrV6/y3HPPkZOTw5QpU/joo4/umqgFN2ZeZ86cYd++fXz77be3OClBQUEsXrwYT09P+vXrd8cZRn19PUuWLGHdunW33eZRe8BLSNyNR8050ev1nD17llOnTuHp6Skua48cOVLcdt++fWi1WlQqFT179iQ4OLhVNKT0er1YDVNRUYFcLhd1mgwGA2q1GicnJ3x9fZvkppiZmaHRaDhx4gQWFhbY29szePBgZDIZR44cISwsjLi4OKZMmYKTkxMJCQl4eXndNb9Fp9OJjVNlMhmTJ09udb0YtVrNjz/+iI2NDfX19bzwwgviPi5cuEB0dDQajQZbW1ueeeaZJo3+mkOv11NQUEBKSgpZWVmoVCqMRiN2dnY4ODhQUlKChYUFnp6e2NraMmTIkDu2EygqKmL79u1otVo8PDwoKioSJ8FpaWl/vpwTQRCIi4tj9uzZZGZmis7Hb710g8HAd999x1tvvYW7uzsbN25kxIgRD5zMavIc9Xo99vb2nD59mh9//JGtW7c2abRka2vbREvEzMwMPz8/Ll26xNWrV5HJZPTs2ZMVK1aICbepqamsWbOGefPm3VMZXW1tLSNHjiQ/P59Dhw416X/zoNTW1rJ+/XpWrFhBcHAw+/fvF69HSUkJL730EjExMeL2o0ePZtu2bS1yUODG+bx06RLbtm2juroajUZDTEwMpaWlYsvwLVu2EBERccffKS0tZe7cufz888/Nfv+oPOAlJFrKo+ScmBqcFhYW4ubmRnFxMf369SMsLKzJM7W+vp6dO3fi4uJCbW0tdnZ2PPHEE00iC/d7XNnZ2WJZ7OnTpykvL8fT05OuXbuK8v75+fm3tOKwsLDAaDSKOX3m5ub06NGDdu3aUVVVRceOHcnKymLq1Kn3tFSdmZnJ8ePHcXZ2RqlUMnLkyFaZGJqWtY4cOSJ2ph45ciRdu3bFaDSSkpJCdHS0uBxfU1ODTqcjPDz8rg6KCbVaTVxcHMnJyWIvM3d3d3x8fNBqtWi1Wuzs7Jg2bdod30XJyclERUWh1+uxs7PDy8uL5ORkUlNT/1zOiVqt5vvvv+f9998XE4NWrFhxWy/XYDAQHR3N7Nmzqa6uZsqUKSxfvlxsWNdSGhsbUalUpKWlsW3bNg4ePIhOp8PT0xNBEMRysPs5zTY2NsyaNYtp06YxadIkVCoVX375JTNnzrwnByUmJobJkyczaNAgdu7c2Wwi7L2gUqk4cOAAn3/+OadPn8bDw4Mff/yR0NBQ8UG1bNkyUlJS6NixI127diU5OZnr168zatQovvzyyxZX8sCNJbOff/6ZDRs2kJKSglarpWvXrnz44YdERES0aFaSm5vLa6+9xrFjx265Fo/CA15C4l54VJyToqIiIiMjaWhoICAggLS0NEaMGEFAQECzv2F6PshkMuRyOXl5eYSEhBAcHNzikma4IURWUVFBcXExOTk5ODk5IZPJKC8vp6GhAVdXV1xcXEhNTcXZ2ZmgoCAyMjLQ6/VNBMzq6uowNzfH2dmZqqoqUcFUqVTi6uqKSqWipKQEHx8fJkyYIAo83g1BEPjll1+Qy+WUlJTw5JNPNqmOvFeMRiP5+fmcPHmShoYGwsLCRI2UUaNGUVRURFxcHLm5uTg6OqJSqdDpdJiZmYkRkP79+zNo0KAWlV/X19eTnp7OmTNnqKqqwtHRUVzicXFxYfz48S3KR0xMTOTQoUNotVqUSiX29vYcOnSInTt3/jmcE1PvicjISORyOWvXrmXWrFktemklJSUxf/58Tpw4QdeuXfn444+ZNGlSi73klJQUVqxYwcmTJyktLRWbNbm4uNCrVy9cXV0JCgpCoVBw5swZ4uPjxQSilmBubs6rr75Kv379mDdvHvb29mKN/ZIlS1r0YDIYDCxbtozVq1ezZMkSli9f3qJ9/5by8nKOHTvGP//5T1EKe/DgwWzcuBE7OzuioqLYtm0b8fHxGI1GAgMD2bJlC/7+/qSnp7N06VIiIyPx8PDggw8+YPr06be90U0zhHXr1rF//36uXLmCwWDA19eXJUuWMH78+GbVDO9EZWUl48aN49y5c00+f9gPeAmJe+VhOyf29vYkJiZy5MgRPDw8qKiowNnZmXHjxt01MmowGDh+/DhXr17Fz8+PgwcPihWULc0BjI2NRaPRkJOTg6OjI+3ataOuro6rV6+i0WgwGAw4ODhgbm5OVVUVWq0WS0tLsdOxi4sLTk5OFBcXo9VqKS8vx9HRkc6dO1NcXEy7du3EdiHXr19HLpdjZWWFjY0NEyZMoHv37i06Xz/99BPdunUjOTmZ11577Z57I2k0Gi5fvsylS5eora0lICAACwsLLl26hJeXFzY2Nly+fFls3mh6rwwbNgxfX1/S0tI4f/48gwYNory8nLq6OoYMGSJ2LL4ZQRCoqanh4sWLJCYmUltbi6WlJR07dhTVwPv160fv3r3vaZXBVBWk1+sxMzMjMzOT7du3P/7OSU5ODi+88AIZGRl06dKFTz/9lMmTJ99TZOHq1at88MEHbN26FQsLC0aPHs2KFSvw9/dv0e9otVpKSkpEMTBHR0c8PDxQKBS3rKlWVVURGRnJgQMHOHToECqVisbGRmQymaji+lvMzc1ZuXIlP/zwA0lJSdjY2IidHr/99tsWebB1dXWEh4eTkpLCoUOHxI6Vd8NoNFJcXMzGjRv58ccfKS8vx97eHn9/fyIiIujatStRUVFs3bqVkpISUcHw2WefZc2aNU1mC7W1tWzcuJG1a9dSXV1Nv379mDp1KlOmTMHW1hZ7e3saGxs5ceIE27dv58CBA2LUycrKihdffJGPPvrogZKYs7KyiIiIaNJ9VHJOJNoaD9M5qa6uJjk5mcOHDyMIAnK5nG7dujFx4sR7qo5JS0sjOjqa0tJSLC0tsba2JiQkhAEDBtzSYfm3mJ5LhYWFVFZWAjdk1t3c3Gjfvj02NjZi3ktjYyO5ubmcOXOGiooK1Gr1HZdYrK2t8fHxoaKigry8PGxtbXFycsJoNFJTU0P79u0ZO3YsPj4+dx1jXl4e0dHRODk54ezsfNciBhO1tbWkpaURFxeHwWBAp9Ph5uZGdXU1jY2NODk5ibIPDg4OyOVy5HI5169fJywsjCFDhohjzM/PJyYmBnd3d9zc3EhMTEQQBAIDA7GxscHd3Z2amhqSkpLIzc1Fp9PRqVMnDAYDxcXF2NraMmLECIKCgu47R+jcuXMcPHgQg8Hw58g5OXHiBLNmzSI3N5ewsDA+//zz26q73o3GxkbWrl3L2rVrqampwdHRkeeee4758+fTq1cv5HJ5qyaTarVaKisrKSws5OrVqzg4OIhtp03U19eLXYKVSiXOzs4kJSWhVCr58MMP2bRpE506dWL9+vUtulHOnTvH+PHjGT9+PJs3b76roV25coVPP/2U/fv34+HhQVhYGDNmzECn03HhwgW2bt1KUlISKpUKuOFEeXt7M336dBYtWtRsVMQks//+++9z8uRJdDod7dq1w9LSkt69e3Pt2jWysrLERGILCwv69u3LqlWrWqVDqek8TJkyhfz8fEByTiTaHg/TOTlw4ABnz54VxQwHDx7MsGHD7ktSvqKigh9//FHMIzPlkgUFBdG9e3exG3BrIAgCFRUV5OTkUF5eTmVlJTY2NpibmzdZ6tHr9ZSXl9OnTx9UKhXZ2dm0a9eOiooKGhoa8PHxQRAEevXqxeDBg+867kOHDlFWVkZlZSUzZsxo0i34t+j1ehITE4mNjUUul1NaWorRaCQ0NJTCwkKqq6sxNzenb9++6PV6MbpTUVGBq6srY8aMuUW1G26kPcTGxpKXl4e3tzfW1tbk5OSQn5+PTCZDp9NhZWWFTqdr4tT16tWLESNG3PGYW4rJbtLT09mxY8fj7Zx07dqVgoIC3n33XRYvXnxXb/tuCILAgQMH+OSTT4iPjxdVVb28vBg0aBBPPfUUgYGBODg43LYkuLUxGo0cO3aMlStXcuLECfGlPWDAAFavXs27776L0Wjk22+/bdK19HbjM6nbfvXVV6LybHNERkayYMECnJ2defHFF8UeChMnTmTChAkUFxcjCAIuLi74+fmJ1UazZ88Ww6l3or6+nsTERPbs2UN6ejqhoaH8/PPPJCUlibX1Y8aMYfDgwUyYMKFJAnFr8PHHH/P3v/8dkJwTibbHw3ROVq1ahUajEZdxTL277he1Ws2BAwdIS0vDzs4OJycnCgsL0ev12NjY4OnpiZeXFw4ODnTo0EHU0bjXfZoiH+bm5nd8ngiCQHV1NbGxseTm5iIIAt27d0epVHLs2DE8PT3FaIa/vz+DBw++47GY8iFtbGyQyWQ8++yzzU6y1Go1v/76KxcvXkSlUqHVaunevTv29vZYWlpSUFBAWVmZ6BiVl5ejVqtxcXFhyJAheHl53dWRMyl/m86vTCZDJpPh7OyMpaUlaWlpWFlZ4eLiQlhYGL6+vq32ntNqtWzYsIFff/2V/fv3P97OiUwm429/+xsffPBBq/ZYqK6u5sMPP+Sbb75pIqluZmaGs7MzdnZ2+Pv7Y2dnx7Bhwxg0aBAeHh44OTnR0NBAZWWluEaXn59PQUFBkyRMlUrFyZMnsbS0ZMaMGYSGht7VsdJoNBw+fJh//OMfHD9+HJ1OxyuvvMKsWbOYO3cujY2N7Nq1667VOPX19bz88sskJiby66+/ihLLN/Pdd9+xZMkSsStzTEwMEydORK/Xs3PnTjIyMnBycuL5559n8eLFoobJ/RqxIAhkZWUxc+ZMTp8+jYuLC4sWLSIsLIzAwMBWa51+M/Hx8YwZM4aGhgbJOZFoczxM52Tfvn2Ulpby3HPPPXByvQlBEMjJyWHfvn3Y2dnh6+tLamoq5eXlYlKnaQnJ3NwcJycnunTpgpubG87OztTX14uVkoIgUFVVdUshgtFopLq6GgsLC3x8fAgICKB79+63TcQVBIHi4mJOnjxJVlaW2MunqKgINzc3zM3NkclkeHh48MQTT9xxQmZKHDY3N8ff3/+Wbu0ajYadO3dSWlqKwWDAzc2N/v37c+TIEQoKCnBwcMDZ2ZnKykrs7OwIDQ3F29tbPBf3+v5Tq9VUV1eTkZFBXFycqKxdW1tLcHAwYWFhLYrG3yv79u3j+++/f/wTYpcuXcqHH37Yqo6JCYPBwIYNG1i9ejXFxcV33NbkabZr1476+npKS0vFkjWdTndL+drNyOVyevToweLFi5k0adJdPd/GxkZWrlzJypUrEQSBdevWMWjQIDHPpiUOyoULFxgxYgTTp0/nn//8p3hz6nQ6Nm7cyMaNG/n4449Rq9WcOXOGXr168fXXX3Pp0iXgRvv1DRs24Ofn98DdQY1GIydOnGDmzJlNOgybmZkhl8v56KOP+Nvf/vZA+7jdfnfu3Mm0adMk50SizfEwnZN169Yxffr0B45UN0dNTQ3//e9/KSwsxMHBgY4dO5KWliYmepomQCbNjN+KSJq+NzkzN2NSYzV9bmZmhre3N2FhYc1O0kyYnJT9+/eLS+/Ozs64ublRV1eHhYUFHh4ed42gREdHc+3aNerr65kxY4YYvSkvL2fPnj1cvXpVdIDMzc25ePEiMplMlLN3dHRkwIABDBgw4IGveX19PceOHRN7kjk7O1NdXY2NjQ06nQ57e3tee+21Vn+3qlQqli9fzpo1ax5v56S6urrVw/2/JSsri3HjxpGbm/u77sfS0pLg4GBWrlzJ0KFD7/jS1+l0rFixglWrVmFjY0NUVBQA06dPx8XFhcjIyDsmyRoMBmbPns327dv56KOPmD9/PiUlJfz973/n2LFjfPPNN5ibm/PZZ5+h1+uJj49Ho9Hg5ubG66+/zsKFC1vlwaTT6di+fTsLFy6kqqrqlu+VSiVbtmwhPDz8gffVHA+74kFyTiTul4dpu8XFxa3S/uN2NDY2cvLkSeLi4gBwdXWluroavV4vTvQsLCwwGAxixNYkCW/idpFck1K0yVExJfAHBgYyfPjwO57L6upqdu3aRV1dHZ07dxaVUxsbG6mpqWH06NF31BKprq5m69at+Pv7c+XKFYYOHUpJSQkJCQlYWlri4eGBwWCgoKBArH6ysbGhqqoKZ2dnxowZg6+v732d05spLy/nl19+obS0FKVSiUajwd7eHr1ej5OTEzk5OfTq1YtJkya1ikjeb9m9ezdTpkx5vJ2TP+rG/OSTT1i6dOnvvh+4Uenz/PPP884779CzZ8/bbmeKcixatIjevXsTHR1NamoqM2bMoF+/fmzatOmODkpKSgqjR4+mqqoKX19fKioq6NChA5988glpaWn885//FBNGAQYOHMiGDRvo27dvq6xB5uXlsWzZMnbu3NlEoA5uRJMmTJjAG2+8wahRo3633B7JOZFoqzzutqvVavnqq6/ESUuHDh3QarVNEmdvjpiYIiim11lzzom5uTl2dnYIgoC5uTnt2rXD2tqa7Oxs1Go1tra2DBo0iL59+942gl1dXc327dsZNGgQycnJBAYGkpubK1YMjRo16o7LIceOHSMhIQGNRoONjQ2Ojo5otVosLCzQarVcv34dmUxGnz59qKio4Nq1a4SFhTFo0KD77kdkQq/Xc/78eY4fP45MJhO7RCcmJooVTKbecBMmTPhdViSg5c7Jg8Xk/yQ8iHjOvVJTU8NXX33FqFGjeOutt24bsZHL5cydO5eFCxeSnJzMu+++y+DBg9m9eze5ubmsWLGi2bJkE35+fixfvhyZTEZmZiahoaGsX7+eXbt2sXjxYtExsbS05K9//Sv79+8nODj4gR2F69evs379ekaNGiUq51paWmJmZoa7uzszZswgKiqKH374gdGjR/8hSccSEhKPFqbSYrgR5SgqKkKr1eLk5CRGPExLNAaDQXQ4LCwssLCwED83GAxYWVnh6uoq5sbV19dTXV1Nbm4u6enpGAwG2rdvT/v27Tly5Aj//ve/OX36tFh8cDNKpZJp06Zx4cIFgoKCOHXqFKGhoaJg2969e6mpqbntuEJDQ3F3d0cmk+Hj4yP2szHJ7Xfq1AmZTEZBQQHV1dX85S9/YcSIEQ/kmAiCQGZmJps3byY6Ohpzc3Pkcjk5OTnExcWhVquxtramV69evPzyyzz77LO/m2NyL7Rcku9PTHx8/O/223K5nMDAQBwdHUlNTRWTuoqLi1m/fj2RkZG88847zJ079xaDkcvlvP/++yQmJrJr1y4GDhzIW2+9xc8//8wrr7zCN998wxtvvHHbfb/66qu4ubmJs6E5c+aQnJws3ui9e/dmwYIFvPTSS/ek3tgcjY2N/Prrr6xYsYKEhASMRiMdOnRg1qxZPP3006SlpTFy5Eg6dOjwu4QSJSQk2g4qlUpMaDU5InV1dbi5uWFvb09dXR3t2rXDzMyMqqoqamtrmyzrmJmZYWtrS7t27dBoNNTW1lJSUiI+20wRFqPRiF6vR6PRoNfrcXBwoLa2lujoaFJSUggPD79FXl+pVDJs2DBiY2Pp168f0dHRPP/886SmpnLq1CnOnj3L0KFDm33BKxQKpkyZwt69e9FoNMjlcvLz8+ncuTMODg4UFBTQ2NiIi4sLkyZNeuCE45KSEo4dO0ZWVhbu7u4olUqqqqqQy+X4+/tTVlaGn58fPXv2bLXk5tZCck5aQF1d3e/yuwqFglmzZvHZZ5+hUCioqKjg+PHjfPLJJ6JIWF5eHu+99x5nzpzhH//4xy3qi87Oznz22WeEh4fz2WefERoaSr9+/dizZw85OTli6LM5LCwseOqpp9i8eTNLly6loqIChULB0KFDiYiIYMqUKa2SW5KVlcWSJUvYu3cvWq2WHj16MH36dF566SWxAeKAAQMeeD8SEhKPByan4XafW1hYYGNjg52dHb169SIhIQFra2tkMhkuLi6cPXsWW1tbSktLxQjIzQmxHTp0YMiQISiVSoqKikhPTycvLw9HR0cGDRrEmTNnKCoqYvPmzYwfP54+ffo0yQPs1q0bxcXFFBcXI5fLOXz4MBMmTMDBwYErV66g1+tvG32wsbFh9OjR7Nu3j+rqalQqFUVFRRiNRtzd3Rk9ejS+vr4PNEkTBIGzZ89y9OhRampqcHV1paioiPbt2zNu3DgCAwPvWa32j+ZP6ZwUFRVRWFjIoEGDWrRs0Fx470Hw8vLCz8+PadOmMWXKFLHm3cHBgTFjxhASEsLYsWPJzs4G/q/LZWpqKqtXr2bMmDFNbpSAgAD+93//l1dffZWlS5fy888/4+zsfFfhnPLycpYuXcqWLVuws7PjtddeIyIigqFDh7ZKF01BEDh58iRz584lJSUFe3t7Fi5cyLx58x5I5VVCQqLtkZSUxNWrVxk+fPhdlymaq7Yx5WeYIiomwUqj0dikUic7OxszMzPKy8tvmZyZm5vj5eXF2LFjcXFxQaVS0b59e4KCgvDy8uLIkSMkJCTg4OCARqNBrVbz888/k5mZyejRo5vkSDzxxBPs2LGDLl26kJqaSlFREb6+vndNWs3KyiImJobQ0FCysrLIy8vDx8dHlKR40MixVqvl0KFDnD9/HrVaLQqITpw4EX9//0diyaYl/OmcE5N8uUwmIz4+/q6Ko/n5+Zw4caLV9m9lZcWXX34p5lM0NDSIYmQJCQlUVlayePFi3n77bRYuXNhk9pCcnMzkyZOZPXs2f/vb35pEUSIiIjh+/DibN2/m//2//8dbb711R8ervLycl156iejoaAICAvjqq6/o37//A5cGm9BqtezatYu33nqLuro6hg4dyvLlyxkyZIi0bCMh8Sfj3LlzJCYmYjQaKSsru2uD1bS0NLFxnbm5OR4eHjQ2NlJdXS06HKbKG1M0xLSsY+o0bGdnh8FgQK1WA+Dm5kZwcDBOTk6UlpZy7Ngx8vLyUKvVCIJA586dxQaAjY2NTZZ/UlJSKCwsZNSoUfj5+QEgk8kYO3Yse/bsoVevXsTExDBt2rQ7RiRMjomDgwMXL17E09OTOXPmtFqCcVVVFVFRUaSlpSGXy7GxsSEwMJCRI0e2mtLuH8WfqlqnoKCA8PBwkpKS8PDw4PDhw3fsjqtWq5k9ezbff/99axy2iIuLi+hYNDY2kp+f3yR51c7OjvXr13PkyBG2bt16y9+bmZkREhLCtm3bmlT0lJSU8MQTT9DQ0EBkZCT9+vVrdv/l5eXMnDmTmJgYJkyYwNdff33PTfTuhEaj4e9//ztffPEF5ubmzJ07l//5n/9p9ZvDFNG635nA417xIPH40pZs98KFCxw8eBBnZ2cUCgXe3t4MGzbstttfu3aN7du3U1dXh0wmIyQkhOzsbLFyx+ScmJwRkxNhepWZnBOlUknHjh2prKwUm7LerJdy8+TNzMwMGxsbGhoa0Ol0Yq+zTp06if144Eae39ChQxk0aJCYh3f+/HlSUlJQKpXY2dkxYsSIZieGWVlZnD59mh49ehAfH8+oUaPo3bt3q00IKysr2blzJ+Xl5RgMBlxdXXn66afp0qVLqxQWmETsVCoVgiDg4eFxX7mILa3W+dNETurr61myZAlJSUkEBQVRV1fHxYsX7+icHDhwgJ07d97xd03lV3Bj+aUl3YbLy8spLy+/47G+8cYbTJ48GblcfkvVjSAInDt3jieffJIvvviC8PBw5HI57u7uvP322yxatIj58+cTHR19i0NgipicPn2a5cuX88477zxwidrN1NTUsHz5ctatW4dCoWDNmjXMnj37vnriCIJAQUEBhYWFTRr16XQ6YmNjxXM4fvx4hg8fTkhISKssR0lISLQOV69e5ciRI+h0OsrKynBxcbmjqKXRaOTw4cPU19fj6uqKubk5586dQ6/Xi06IqXTYFIE1LeuYXvIm/ZPa2lpSUlLo1q0bnTp1wmg0UllZicFgEFtk9O7dm9TUVAoKCsTGoyZdEUEQUKlU2NjYiP3DtFothw8fpqioiHHjxqFUKvHz8+PChQt4eXkRHx+Pt7f3LRWeWVlZxMXF4ezsTFpaGpMnT6ZTp06tdp4zMzM5dOgQcKNxoZOTE5MnT76vnjj19fWkpaVRX1/fJKVBpVKRlpaGQqEQn7Pe3t4EBQXh5ubW6stFfxrn5JdffmHHjh0EBweze/duzMzM7tjaWxAEfvnll9uquyqVSgIDA5kxYwb9+/fHzMyMtLQ0Tp48SXZ2NgaDgdTUVK5fvy6GDe8FlUrF999/f0dn5+rVq0yfPp0FCxawZMkS7O3tefXVV9mxYwcJCQns3buXadOmids3Njby17/+lYKCAn744YdbclcelOrqat58801++OEH7O3t2bhxIxEREfe9jFNTU8OGDRvIyMhoNu9HEASSk5OJi4vD1taWvn37MmTIEMaPH8+AAQMkR0VC4iFiNBqJiYnB39+fCxcu8OSTT4r9Ym5HfX09hYWFoqNwc0M+ExYWFri5udGhQwfMzMwoLS2lvLxcbDVyc5TAJIuvUChwdXXF2dkZlUpFbW0teXl55OXliSXHDQ0NjB8/nvPnz4sy8pWVlVhYWIgOkek3MzIyqKio4Omnn8bLy4sRI0aI3d5PnjwplgTDjed0dHS0+DtTpkxp1SjypUuXOH78OEajkdraWrp3787YsWPvu5jhypUrlJWV4ezs3CSyUV9fL46hQ4cOODs7o9Pp+OWXXzAYDPTq1Yu+ffu2WhT+T+OcxMbGYmVlxZo1a+663gk3BGvq6+uRy+V4eHjg4OCAlZUV/fv3Jzg4mJEjR+Lp6dlkfdHf35+pU6cCN27MqqoqioqKxHryixcvUlpaSkFBgVi21tyFFASBwsJCsdnendBoNKxZs4a6ujqWLVuGi4sLq1ev5plnnmHhwoX4+fkREBAAQHFxMUqlkv3797foHNwLRUVFvPLKKxw9epTg4GDWrl3L0KFDHyicqFQqWb169W2/12q1ZGdnExkZybZt2zhz5gwXL14kOjqaZcuWMXHixPvet4SExIOhUqmoqqrCzs6OHj16EBISctfngUqlQiaT0dDQIOqXmJZcnJ2d6dGjB15eXnTq1KnJb9XX19PY2IjRaKSoqIjS0tImTotaraaoqAhAXN75bUS6pqaGqKgorK2tmywTKRQKunTpwpUrV0QHyJSQ+9NPPzFp0iR8fHzo2LEj9fX12NnZceLECYYPH465uTl5eXl07NiRTp06ERQU1Go5dwaDgeTkZGJiYhg6dCjnz58nICCAcePGPVAUw9/fny5dujQ7IfTz80OtVnP58mUuX76MmZkZAQEB5ObmYmVlRXJyMiNHjmyVSW+bzDmpqalBqVSK/RdawvLlyzlx4gRRUVEtnlFXVVWRnp5Ojx49sLOzw8zM7IFm4zqdDp1OR1FREXq9Hnt7+1tq6E3k5+cTHx/P7t27iY2NbdHvL1y4kGXLlmE0GnnzzTf5/vvv8fPzY9OmTfTo0UO8KVs7IfXKlStMnTqVjIwMrK2tOXDgwF37+7Q29fX1nDp1ig4dOuDt7Y1CobjrDVJbW0unTp3+kDYIJu7HdiUkfsvDtN2tW7e2aClYo9Fw8OBB1Go1w4YNw8PDo0X7ycnJISUlRdT+sLCwwNXVFYVCcU/PLkEQ0Gg0VFVV0dDQQGFhIQaD4a7P3ZycHKytrZu84O3t7XF3dyc3N1fs+6XX6zEajVhZWTF27FgaGxs5evQotra2VFdX4+/vT/fu3VGr1ajVapycnFpVVDI/P5/U1FTMzMzQ6XS4uroycODAP0y4UqvVkp+fT1JSEk5OTnh5eYk6NHfi1KlTfPbZZ3e13TbpnOTm5t6xUZOExL1QWFj4h5U2S7Yr0ZpItivRVrmb7bbJZR1Tkk9BQcEfNmv4ozHNjB7nGfbDHqMgCNTV1f2uTcx+i2S7jwcPe4yS7f4+POzr+kfwsMfYUtttk86JKVzv6Oj42BqQCQcHB2mMvyN/9ENWst3HC8l2H08k2/19aYntSo3/JCQkJCQkJB4pJOdEQkJCQkJC4pGiTTonCoWCZcuWPdY6FtIYH0/+DGOWxvh48mcYszTGR4c2Wa0jISEhISEh8fjSJiMnEhISEhISEo8vknMiISEhISEh8UghOScSEhISEhISjxSScyIhISEhISHxSCE5JxISEhISEhKPFG3SOfnXv/6Fl5cXVlZWDBw4kISEhId9SC1i5cqV9O/fH3t7e1xdXZk0aRIZGRlNtmlsbGTevHm0a9cOOzs7Jk+eTGlpaZNtCgoKmDBhAjY2Nri6uvLee++h1+v/yKG0mFWrVmFmZsY777wjfva4jfFekGy37VxXyXabItlu27muj4XtCm2MHTt2CJaWlsKmTZuE1NRUYfbs2YJSqRRKS0sf9qHdlbFjxwqbN28WUlJShKSkJOGpp54SOnfuLNTX14vbzJkzR+jUqZNw+PBh4dy5c8KgQYOEsLAw8Xu9Xi/4+fkJo0aNEi5cuCBERUUJ7du3F/72t789jCHdkYSEBMHLy0sICAgQ5s+fL37+OI3xXpBst+1cV8l2myLZbtu5ro+L7bY552TAgAHCvHnzxP83GAyCp6ensHLlyod4VPdHWVmZAAjHjx8XBEEQqqurBblcLvz444/iNunp6QIgnDp1ShAEQYiKihLMzc2FkpIScZuNGzcKDg4Ogkaj+WMHcAfq6uqE7t27CzExMcKwYcPEm+RxGuO9Itlu27iuku3eimS7beO6Pk6226aWdbRaLYmJiYwaNUr8zNzcnFGjRnHq1KmHeGT3R01NDfB/3T4TExPR6XRNxtezZ086d+4sju/UqVP4+/vj5uYmbjN27Fhqa2tJTU39A4/+zsybN48JEyY0GQs8XmO8FyTbbTvXVbLdpki223au6+Nku22qK3FFRQUGg6HJyQNwc3Pj8uXLD+mo7g+j0cg777zDE088gZ+fHwAlJSVYWlqiVCqbbOvm5kZJSYm4TXPjN333KLBjxw7Onz/P2bNnb/nucRnjvSLZbtu4rpLt3opku23juj5uttumnJPHiXnz5pGSkkJcXNzDPpRWpbCwkPnz5xMTE4OVldXDPhyJ3wHJdiXaKpLtth3a1LJO+/btsbCwuCXDuLS0FHd394d0VPfOm2++SWRkJEePHqVjx47i5+7u7mi1Wqqrq5tsf/P43N3dmx2/6buHTWJiImVlZQQHByOTyZDJZBw/fpwvv/wSmUyGm5tbmx/j/SDZ7qN/XSXbbR7Jdh/96/pY2u4fnuXygAwYMEB48803xf83GAxChw4d2kRiltFoFObNmyd4enoKmZmZt3xvSlr66aefxM8uX77cbNLSzVnyX331leDg4CA0Njb+/oO4C7W1tcKlS5ea/OvXr58wffp04dKlS4/FGO8XyXYf7esq2e7tkWz30b6uj6PttjnnZMeOHYJCoRC2bNkipKWlCa+//rqgVCqbZBg/qsydO1dwdHQUjh07Jly7dk38p1KpxG3mzJkjdO7cWThy5Ihw7tw5ITQ0VAgNDRW/N5V7jRkzRkhKShIOHjwouLi4PJIlbSZuzhoXhMdzjC1Bst22d10l272BZLtt77q2ddttc86JIAjCunXrhM6dOwuWlpbCgAEDhNOnTz/sQ2oRQLP/Nm/eLG6jVquFN954Q3BychJsbGyEZ599Vrh27VqT38nLyxPGjx8vWFtbC+3btxfeffddQafT/cGjaTm/vUkexzG2FMl229Z1lWz3/5Bst21d17Zuu2aCIAh/3CKShISEhISEhMSdaVMJsRISEhISEhKPP5JzIiEhISEhIfFIITknEhISEhISEo8UknMiISEhISEh8UghOScSEhISEhISjxSScyIhISEhISHxSCE5JxISEhISEhKPFJJzIiEhISEhIfFIITknEhISEhISEo8UknMiISEhISEh8UghOScSEhISEhISjxT/Hy7wUJLUXV9oAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import draw_tools\n", + "image = numpy.array(PIL.Image.open(io.BytesIO(recorder.image.value)))[..., :3]\n", + "# 生成风格图像\n", + "im = draw_tools.generate_style_image(image)\n", + "# 获取轮廓列表\n", + "contour_list = draw_tools.getContourList(im, pen_width = 3, min_contour_len = 30, is_show=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# 对轮廓列表进行排序\n", + "contour_list = draw_tools.sortContoursList(contour_list)\n", + "# 平滑拟合并采样轮廓\n", + "f_contour_list = draw_tools.sample_and_smooth_contours(im, contour_list, is_show=False)\n", + "# 保存轮廓点到文件中,每个轮廓占一行,x和y坐标用逗号分割,点之间用逗号分割\n", + "draw_tools.save_contour_points(f_contour_list, \"../data/contour_data.txt\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import shutil\n", + "shutil.copy( \"../data/contour_data.txt\", \"/home/robot/Work/system/bspline.txt\")\n", + "import os\n", + "currdir = os.getcwd()\n", + "os.chdir('/home/ck/')" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "#1、执行./IGH_rc.sh,启动igh\n", + "#2、执行./runIGH.sh,开启通讯\n", + "#3、执行./runrobot.sh,运行画画程序\n", + "#4、执行./stoprobot.sh,关闭画画程序\n", + "#5、执行./runrobotoig.sh,运行运动程序,可在桌面程序上运动\n", + "#6、执行./stoprobotoig.sh,关闭运动程序\n", + "#7、执行./stopIGH.sh,关闭通讯" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting EtherCAT master 1.5.2 done\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#1、执行./IGH_rc.sh,启动igh\n", + "os.system(\"./IGH_rc.sh\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Failed to reserve master: Device or resource busy\n", + "root 38530 1 1 Dec06 ? 00:04:26 ./IgHEtherCATMaster --task run --file ./eni.xml --affinity 1\n", + "root 111117 110176 0 02:13 ? 00:00:00 sh -c ps -ef | grep Master\n", + "root 111119 111117 0 02:13 ? 00:00:00 grep Master\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import time\n", + "#2、执行./runIGH.sh,开启通讯\n", + "os.system(\"./runIGH.sh\")\n", + "time.sleep(5)\n", + "os.system(\"ps -ef | grep Master\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "============start robot control======================\n", + "start run time:20231207021353\n", + "verison: 11.4.6\n", + "Author: HanBing\n", + "Email: A994055925@163.com\n", + "Data file path: /home/ck/robot_config/data/\n", + "Start system initialize!\n", + "OUT SINGLE ABSOLUTE ENCOUDER(POSITION CLOSED LOOP)\n", + "group init existing problem. Some fields are not found!\n", + "dof num is 6\n", + "control:6,state:6,mode:0\n", + "ac_position:6,ac_velocity:0,ac_torque:0,ac_position2:0,ac_velocity2:0,ac_sensor_torque:0,ac_mode:0,ErrorCode:0,FollowingErrorActualValue:0\n", + "position:6,velocity:0,torque:0,VelocityOffset:0,TorqueOffset:0,TorqueMaxLimit:0,TorqueMinLimit:0\n", + "/////////////////////////////////////////////////////////////////\n", + "fd_ecat_in_name: /ecat_in\n", + "fd_ecat_out_name: /ecat_out\n", + "/////////////////////////////////////////////////////////////////\n", + "state: Status Word\n", + "control: Control word\n", + "mode: \n", + "ac_position: Position actual value\n", + "ac_velocity: \n", + "ac_torque: \n", + "ac_position2: \n", + "ac_velocity2: \n", + "ac_sensor_torque: \n", + "ac_mode: \n", + "ErrorCode: \n", + "FollowingErrorActualValue: \n", + "position: Target Position\n", + "velocity: \n", + "torque: \n", + "VelocityOffset: \n", + "TorqueOffset: \n", + "TorqueMaxLimit: \n", + "TorqueMinLimit: \n", + "ec_di: \n", + "ec_do: \n", + "ec_ai: \n", + "ec_ao: \n", + "ec_li: \n", + "ec_lo: \n", + "Number of fields: 26\n", + "busyTs: 4000000 ns\n", + "/////////////////////////////////////////////////////////////////\n", + "ec_device1/robot0: \n", + "DOF: 6\n", + "0_state: 64\n", + "0_control: 64\n", + "0_mode: -999999\n", + "0_ac_position: 0\n", + "0_ac_velocity: -999999\n", + "0_ac_torque: -999999\n", + "0_ac_current: -999999\n", + "0_ac_position2: -999999\n", + "0_ac_velocity2: -999999\n", + "0_ac_sensor_torque: -999999\n", + "0_ac_mode: -999999\n", + "0_ErrorCode: -999999\n", + "0_FollowingErrorActualValue: -999999\n", + "0_position: 0\n", + "0_velocity: -999999\n", + "0_torque: -999999\n", + "0_VelocityOffset: -999999\n", + "0_TorqueOffset: -999999\n", + "0_TorqueMaxLimit: -999999\n", + "0_TorqueMinLimit: -999999\n", + "1_state: 144\n", + "1_control: 144\n", + "1_mode: -999999\n", + "1_ac_position: 80\n", + "1_ac_velocity: -999999\n", + "1_ac_torque: -999999\n", + "1_ac_current: -999999\n", + "1_ac_position2: -999999\n", + "1_ac_velocity2: -999999\n", + "1_ac_sensor_torque: -999999\n", + "1_ac_mode: -999999\n", + "1_ErrorCode: -999999\n", + "1_FollowingErrorActualValue: -999999\n", + "1_position: 80\n", + "1_velocity: -999999\n", + "1_torque: -999999\n", + "1_VelocityOffset: -999999\n", + "1_TorqueOffset: -999999\n", + "1_TorqueMaxLimit: -999999\n", + "1_TorqueMinLimit: -999999\n", + "2_state: 224\n", + "2_control: 224\n", + "2_mode: -999999\n", + "2_ac_position: 160\n", + "2_ac_velocity: -999999\n", + "2_ac_torque: -999999\n", + "2_ac_current: -999999\n", + "2_ac_position2: -999999\n", + "2_ac_velocity2: -999999\n", + "2_ac_sensor_torque: -999999\n", + "2_ac_mode: -999999\n", + "2_ErrorCode: -999999\n", + "2_FollowingErrorActualValue: -999999\n", + "2_position: 160\n", + "2_velocity: -999999\n", + "2_torque: -999999\n", + "2_VelocityOffset: -999999\n", + "2_TorqueOffset: -999999\n", + "2_TorqueMaxLimit: -999999\n", + "2_TorqueMinLimit: -999999\n", + "3_state: 304\n", + "3_control: 304\n", + "3_mode: -999999\n", + "3_ac_position: 240\n", + "3_ac_velocity: -999999\n", + "3_ac_torque: -999999\n", + "3_ac_current: -999999\n", + "3_ac_position2: -999999\n", + "3_ac_velocity2: -999999\n", + "3_ac_sensor_torque: -999999\n", + "3_ac_mode: -999999\n", + "3_ErrorCode: -999999\n", + "3_FollowingErrorActualValue: -999999\n", + "3_position: 240\n", + "3_velocity: -999999\n", + "3_torque: -999999\n", + "3_VelocityOffset: -999999\n", + "3_TorqueOffset: -999999\n", + "3_TorqueMaxLimit: -999999\n", + "3_TorqueMinLimit: -999999\n", + "4_state: 384\n", + "4_control: 384\n", + "4_mode: -999999\n", + "4_ac_position: 320\n", + "4_ac_velocity: -999999\n", + "4_ac_torque: -999999\n", + "4_ac_current: -999999\n", + "4_ac_position2: -999999\n", + "4_ac_velocity2: -999999\n", + "4_ac_sensor_torque: -999999\n", + "4_ac_mode: -999999\n", + "4_ErrorCode: -999999\n", + "4_FollowingErrorActualValue: -999999\n", + "4_position: 320\n", + "4_velocity: -999999\n", + "4_torque: -999999\n", + "4_VelocityOffset: -999999\n", + "4_TorqueOffset: -999999\n", + "4_TorqueMaxLimit: -999999\n", + "4_TorqueMinLimit: -999999\n", + "5_state: 464\n", + "5_control: 464\n", + "5_mode: -999999\n", + "5_ac_position: 400\n", + "5_ac_velocity: -999999\n", + "5_ac_torque: -999999\n", + "5_ac_current: -999999\n", + "5_ac_position2: -999999\n", + "5_ac_velocity2: -999999\n", + "5_ac_sensor_torque: -999999\n", + "5_ac_mode: -999999\n", + "5_ErrorCode: -999999\n", + "5_FollowingErrorActualValue: -999999\n", + "5_position: 400\n", + "5_velocity: -999999\n", + "5_torque: -999999\n", + "5_VelocityOffset: -999999\n", + "5_TorqueOffset: -999999\n", + "5_TorqueMaxLimit: -999999\n", + "5_TorqueMinLimit: -999999\n", + "/////////////////////////////////////////////////////////////////\n", + "/////////////////////////////////////////////////////////////////\n", + "/////////////////////////////////////////////////////////////////" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dev/mem: Bad address\n", + "/dev/mem: Bad address\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Device is running\n", + "InitRobot\n", + "Hardware Match\n", + "printf_hardware_information: can't get cpuid\n", + "Hardware information: \n", + " cpuid:0000000000000000\n", + " mac:128a5e84678d\n", + "init_robot_teach\n", + "set_robot_index\n", + "get_robot_num\n", + "set_addition_index\n", + "get_addition_num\n", + "get_robot_num\n", + "initialize finish\n", + "start Draw\n", + "power is finish!\n", + "150\n", + "1\n", + "2\n", + "3\n", + "4\n", + "5\n", + "6\n", + "7\n", + "8\n", + "9\n", + "10\n", + "11\n", + "12\n", + "13\n", + "14\n", + "15\n", + "16\n", + "17\n", + "18\n", + "19\n", + "20\n", + "21\n", + "22\n", + "23\n", + "24\n", + "25\n", + "26\n", + "27\n", + "28\n", + "29\n", + "30\n", + "31\n", + "32\n", + "33\n", + "34\n", + "35\n", + "36\n", + "37\n", + "38\n", + "39\n", + "40\n", + "41\n", + "42\n", + "43\n", + "44\n", + "45\n", + "46\n", + "47\n", + "48\n", + "49\n", + "50\n", + "51\n", + "52\n", + "53\n", + "54\n", + "55\n", + "56\n", + "57\n", + "58\n", + "59\n", + "60\n", + "61\n", + "62\n", + "63\n", + "64\n", + "65\n", + "66\n", + "67\n", + "68\n", + "69\n", + "70\n", + "71\n", + "72\n", + "73\n", + "74\n", + "75\n", + "76\n", + "77\n", + "78\n", + "79\n", + "80\n", + "81\n", + "82\n", + "83\n", + "84\n", + "85\n", + "86\n", + "87\n", + "88\n", + "89\n", + "90\n", + "91\n", + "92\n", + "93\n", + "94\n", + "95\n", + "96\n", + "97\n", + "98\n", + "99\n", + "100\n", + "101\n", + "102\n", + "103\n", + "104\n", + "105\n", + "106\n", + "107\n", + "108\n", + "109\n", + "110\n", + "111\n", + "112\n", + "113\n", + "114\n", + "115\n", + "116\n", + "117\n", + "118\n", + "119\n", + "120\n", + "121\n", + "122\n", + "123\n", + "124\n", + "125\n", + "126\n", + "127\n", + "128\n", + "129\n", + "130\n", + "131\n", + "132\n", + "133\n", + "134\n", + "135\n", + "136\n", + "137\n", + "138\n", + "139\n", + "140\n", + "141\n", + "142\n", + "143\n", + "144\n", + "145\n", + "146\n", + "147\n", + "148\n", + "149\n", + "150\n" + ] + } + ], + "source": [ + "# 运行画画\n", + "# 3、执行./runrobot.sh,运行画画程序\n", + "# 4、执行./stoprobot.sh,关闭画画程序\n", + "# 5、执行./runrobotoig.sh,运行运动程序,可在桌面程序上运动\n", + "# 6、执行./stoprobotoig.sh,关闭运动程序\n", + "os.system(\"./runrobot.sh\")\n", + "# os.system(\"./stoprobot.sh\")\n", + "# os.system(\"./runrobotoig.sh\")\n", + "# os.system(\"./stoprobotoig.sh\")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "#4、执行./stoprobot.sh,关闭画画程序\n", + "os.system(\"./stoprobot.sh\")\n", + "os.system(\"./stoprobot.sh\")\n", + "os.system(\"./stopIGH.sh\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/robot_painting/qmupd_vs/test.py b/robot_painting/qmupd_vs/test.py new file mode 100644 index 0000000000000000000000000000000000000000..a93a3ffa6ae0907b3c6ab038c84f713033dc6cbe --- /dev/null +++ b/robot_painting/qmupd_vs/test.py @@ -0,0 +1,70 @@ +"""General-purpose test script for image-to-image translation. + +Once you have trained your model with train.py, you can use this script to test the model. +It will load a saved model from --checkpoints_dir and save the results to --results_dir. + +It first creates model and dataset given the option. It will hard-code some parameters. +It then runs inference for --num_test images and save results to an HTML file. + +Example (You need to train models first or download pre-trained models from our website): + Test a CycleGAN model (both sides): + python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan + + Test a CycleGAN model (one side only): + python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout + + The option '--model test' is used for generating CycleGAN results only for one side. + This option will automatically set '--dataset_mode single', which only loads the images from one set. + On the contrary, using '--model cycle_gan' requires loading and generating results in both directions, + which is sometimes unnecessary. The results will be saved at ./results/. + Use '--results_dir ' to specify the results directory. + + Test a pix2pix model: + python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA + +See options/base_options.py and options/test_options.py for more test options. +See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md +See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md +""" +import os +from options.test_options import TestOptions +from data import create_dataset +from models import create_model +from util.visualizer import save_images +from util import html + + +if __name__ == '__main__': + opt = TestOptions().parse() # get test options + # hard-code some parameters for test + opt.num_threads = 0 # test code only supports num_threads = 1 + opt.batch_size = 1 # test code only supports batch_size = 1 + opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. + opt.no_flip = True # no flip; comment this line if results on flipped images are needed. + opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. + dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + model = create_model(opt) # create a model given opt.model and other options + model.setup(opt) # regular setup: load and print networks; create schedulers + # create a website + web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch)) # define the website directory + #webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) + webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch), refresh=0, folder=opt.imagefolder) + # test with eval mode. This only affects layers like batchnorm and dropout. + # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode. + # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout. + if opt.eval: + model.eval() + for name in model.model_names: + if isinstance(name, str): + print(getattr(model, 'net' + name).training) + for i, data in enumerate(dataset): + if i >= opt.num_test: # only apply our model to opt.num_test images. + break + model.set_input(data) # unpack data from data loader + model.test() # run inference + visuals = model.get_current_visuals() # get image results + img_path = model.get_image_paths() # get image paths + if i % 5 == 0: # save images to an HTML file + print('processing (%04d)-th image... %s' % (i, img_path)) + save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) + webpage.save() # save the HTML diff --git a/robot_painting/qmupd_vs/test_photograph_to_line.py b/robot_painting/qmupd_vs/test_photograph_to_line.py new file mode 100644 index 0000000000000000000000000000000000000000..751d243d3565095e246a9092a94cd026a0de4d8e --- /dev/null +++ b/robot_painting/qmupd_vs/test_photograph_to_line.py @@ -0,0 +1,260 @@ +import numpy as np +import os +import tensorflow as tf +from six.moves import range +from PIL import Image +import argparse + +import hyper_parameters as hparams +from model_common_test import DiffPastingV3, VirtualSketchingModel +from utils import reset_graph, load_checkpoint, update_hyperparams, draw, \ + save_seq_data, image_pasting_v3_testing, draw_strokes +from dataset_utils import load_dataset_testing + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + +def sample(sess, model, input_photos, init_cursor, image_size, init_len, seq_len, state_dependent, + pasting_func): + """Samples a sequence from a pre-trained model.""" + select_times = 1 + cursor_pos = np.squeeze(init_cursor, axis=0) # (select_times, 1, 2) + curr_canvas = np.zeros(dtype=np.float32, + shape=(select_times, image_size, image_size)) # [0.0-BG, 1.0-stroke] + + initial_state = sess.run(model.initial_state) + prev_state = initial_state + prev_width = np.stack([model.hps.min_width for _ in range(select_times)], axis=0) + prev_scaling = np.ones((select_times), dtype=np.float32) # (N) + prev_window_size = np.ones((select_times), dtype=np.float32) * model.hps.raster_size # (N) + + params_list = [[] for _ in range(select_times)] + state_raw_list = [[] for _ in range(select_times)] + state_soft_list = [[] for _ in range(select_times)] + window_size_list = [[] for _ in range(select_times)] + + input_photos_tiles = np.tile(input_photos, (select_times, 1, 1, 1)) + + for i in range(seq_len): + if not state_dependent and i % init_len == 0: + prev_state = initial_state + + curr_window_size = prev_scaling * prev_window_size # (N) + curr_window_size = np.maximum(curr_window_size, model.hps.min_window_size) + curr_window_size = np.minimum(curr_window_size, image_size) + + feed = { + model.initial_state: prev_state, + model.input_photo: input_photos_tiles, + model.curr_canvas_hard: curr_canvas.copy(), + model.cursor_position: cursor_pos, + model.image_size: image_size, + model.init_width: prev_width, + model.init_scaling: prev_scaling, + model.init_window_size: prev_window_size, + } + + o_other_params_list, o_pen_list, o_pred_params_list, next_state_list = \ + sess.run([model.other_params, model.pen_ras, model.pred_params, model.final_state], feed_dict=feed) + # o_other_params: (N, 6), o_pen: (N, 2), pred_params: (N, 1, 7), next_state: (N, 1024) + # o_other_params: [tanh*2, sigmoid*2, tanh*2, sigmoid*2] + + idx_eos_list = np.argmax(o_pen_list, axis=1) # (N) + + for output_i in range(idx_eos_list.shape[0]): + idx_eos = idx_eos_list[output_i] + + eos = [0, 0] + eos[idx_eos] = 1 + + other_params = o_other_params_list[output_i].tolist() # (6) + params_list[output_i].append([eos[1]] + other_params) + state_raw_list[output_i].append(o_pen_list[output_i][1]) + state_soft_list[output_i].append(o_pred_params_list[output_i, 0, 0]) + window_size_list[output_i].append(curr_window_size[output_i]) + + # draw the stroke and add to the canvas + x1y1, x2y2, width2 = o_other_params_list[output_i, 0:2], o_other_params_list[output_i, 2:4], \ + o_other_params_list[output_i, 4] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width[output_i], width2], axis=0) # (2) + o_other_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1).tolist() # (8) + + if idx_eos == 0: + f = o_other_params_proc + [1.0, 1.0] + pred_stroke_img = draw(f) # (raster_size, raster_size), [0.0-stroke, 1.0-BG] + pred_stroke_img_large = image_pasting_v3_testing(1.0 - pred_stroke_img, cursor_pos[output_i, 0], + image_size, + curr_window_size[output_i], + pasting_func, sess) # [0.0-BG, 1.0-stroke] + curr_canvas[output_i] += pred_stroke_img_large # [0.0-BG, 1.0-stroke] + curr_canvas = np.clip(curr_canvas, 0.0, 1.0) + + next_width = o_other_params_list[:, 4] # (N) + next_scaling = o_other_params_list[:, 5] + next_window_size = next_scaling * curr_window_size # (N) + next_window_size = np.maximum(next_window_size, model.hps.min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_state = next_state_list + prev_width = next_width * curr_window_size / next_window_size # (N,) + prev_scaling = next_scaling # (N) + prev_window_size = curr_window_size + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = o_other_params_list[:, 2:4] * (np.expand_dims(curr_window_size, axis=-1) / 2.0) # (N, 2), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[:, 1:2], new_cursor_offset_next[:, 0:1]], axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + stroke_position_next = cursor_pos_large[:, 0, :] + new_cursor_offset_next # (N, 2), large-level + + if model.hps.cursor_type == 'next': + cursor_pos_large = stroke_position_next # (N, 2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), float(image_size - 1)) # (N, 2), large-level + cursor_pos_large = np.expand_dims(cursor_pos_large, axis=1) # (N, 1, 2) + cursor_pos = cursor_pos_large / float(image_size) + + return params_list, state_raw_list, state_soft_list, curr_canvas, window_size_list + + +def main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, + sampling_num, + draw_seq=False, draw_order=False, + state_dependent=True, longer_infer_len=-1): + model_params_default = hparams.get_default_hparams_normal() + model_params = update_hyperparams(model_params_default, model_base_dir, model_name, infer_dataset=test_dataset) + + [test_set, eval_hps_model, sample_hps_model] = \ + load_dataset_testing(test_image_base_dir, test_dataset, test_image_name, model_params) + + test_image_raw_name = test_image_name[:test_image_name.find('.')] + model_dir = os.path.join(model_base_dir, model_name) + + reset_graph() + sampling_model = VirtualSketchingModel(sample_hps_model) + + # differentiable pasting graph + paste_v3_func = DiffPastingV3(sample_hps_model.raster_size) + + tfconfig = tf.ConfigProto() + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + # loads the weights from checkpoint into our model + snapshot_step = load_checkpoint(sess, model_dir, gen_model_pretrain=True) + print('snapshot_step', snapshot_step) + sampling_dir = os.path.join(sampling_base_dir, test_dataset + '__' + model_name) + os.makedirs(sampling_dir, exist_ok=True) + + if longer_infer_len == -1: + tmp_max_len = eval_hps_model.max_seq_len + else: + tmp_max_len = longer_infer_len + + for sampling_i in range(sampling_num): + input_photos, init_cursors, test_image_size = test_set.get_test_image() + # input_photos: (1, image_size, image_size, 3), [0-stroke, 1-BG] + # init_cursors: (N, 1, 2), in size [0.0, 1.0) + + print() + print(test_image_name, ', image_size:', test_image_size, ', sampling_i:', sampling_i) + print('Processing ...') + + if init_cursors.ndim == 3: + init_cursors = np.expand_dims(init_cursors, axis=0) + + input_photos = input_photos[0:1, :, :, :] + + ori_img = (input_photos.copy()[0] * 255.0).astype(np.uint8) + ori_img_png = Image.fromarray(ori_img, 'RGB') + ori_img_png.save(os.path.join(sampling_dir, test_image_raw_name + '_input.png'), 'PNG') + + # decoding for sampling + strokes_raw_out_list, states_raw_out_list, states_soft_out_list, pred_imgs_out, window_size_out_list = sample( + sess, sampling_model, input_photos, init_cursors, test_image_size, + eval_hps_model.max_seq_len, tmp_max_len, state_dependent, paste_v3_func) + # pred_imgs_out: (N, H, W), [0.0-BG, 1.0-stroke] + + output_i = 0 + strokes_raw_out = np.stack(strokes_raw_out_list[output_i], axis=0) + states_raw_out = states_raw_out_list[output_i] + states_soft_out = states_soft_out_list[output_i] + window_size_out = window_size_out_list[output_i] + + round_new_lengths = [tmp_max_len] + multi_cursors = [init_cursors[0, output_i, 0, :]] + + print('strokes_raw_out', strokes_raw_out.shape) + + clean_states_soft_out = np.array(states_soft_out) # (N) + + flag_list = strokes_raw_out[:, 0].astype(np.int32) # (N) + drawing_len = len(flag_list) - np.sum(flag_list) + assert drawing_len >= 0 + + # print(' flag raw\t soft\t x1\t\t y1\t\t x2\t\t y2\t\t r2\t\t s2') + for i in range(strokes_raw_out.shape[0]): + flag, x1, y1, x2, y2, r2, s2 = strokes_raw_out[i] + win_size = window_size_out[i] + out_format = '#%d: %d | %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' + out_values = (i, flag, states_raw_out[i], clean_states_soft_out[i], x1, y1, x2, y2, r2, s2) + out_log = out_format % out_values + # print(out_log) + + print('Saving results ...') + save_seq_data(sampling_dir, test_image_raw_name + '_' + str(sampling_i), + strokes_raw_out, init_cursors[0, output_i, 0, :], + test_image_size, tmp_max_len, eval_hps_model.min_width) + + draw_strokes(strokes_raw_out, sampling_dir, test_image_raw_name + '_' + str(sampling_i) + '_pred.png', + ori_img, test_image_size, + multi_cursors, round_new_lengths, eval_hps_model.min_width, eval_hps_model.cursor_type, + sample_hps_model.raster_size, sample_hps_model.min_window_size, + sess, + pasting_func=paste_v3_func, + save_seq=draw_seq, draw_order=draw_order) + + +def main(model_name, test_image_name, sampling_num): + test_dataset = 'faces' + test_image_base_dir = 'sample_inputs' + + sampling_base_dir = 'outputs/sampling' + model_base_dir = 'outputs/snapshot' + + state_dependent = False + longer_infer_len = 100 + + draw_seq = False + draw_color_order = True + + # set numpy output to something sensible + np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True) + + main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, sampling_num, + draw_seq=draw_seq, draw_order=draw_color_order, + state_dependent=state_dependent, longer_infer_len=longer_infer_len) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--input', '-i', type=str, default='', help="The test image name.") + parser.add_argument('--model', '-m', type=str, default='pretrain_faces', help="The trained model.") + parser.add_argument('--sample', '-s', type=int, default=1, help="The number of outputs.") + args = parser.parse_args() + + assert args.input != '' + assert args.sample > 0 + + main(args.model, args.input, args.sample) diff --git a/robot_painting/qmupd_vs/test_rough_sketch_simplification.py b/robot_painting/qmupd_vs/test_rough_sketch_simplification.py new file mode 100644 index 0000000000000000000000000000000000000000..ebe7f2ebcecfd2a207a8d41b948fd5c6bb62709e --- /dev/null +++ b/robot_painting/qmupd_vs/test_rough_sketch_simplification.py @@ -0,0 +1,361 @@ +import numpy as np +import os +import tensorflow as tf +from six.moves import range +from PIL import Image +import argparse + +import hyper_parameters as hparams +from model_common_test import DiffPastingV3, VirtualSketchingModel +from utils import reset_graph, load_checkpoint, update_hyperparams, draw, \ + save_seq_data, image_pasting_v3_testing, draw_strokes +from dataset_utils import load_dataset_testing + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + +def move_cursor_to_undrawn(current_pos_list, input_image_, patch_size, + move_min_dist, move_max_dist, trial_times=20): + """ + :param current_pos_list: (select_times, 1, 2), [0.0, 1.0) + :param input_image_: (1, image_size, image_size, 3), [0-stroke, 1-BG] + :return: new_cursor_pos: (select_times, 1, 2), [0.0, 1.0) + """ + + def crop_patch(image, center, image_size, crop_size): + x0 = center[0] - crop_size // 2 + x1 = x0 + crop_size + y0 = center[1] - crop_size // 2 + y1 = y0 + crop_size + x0 = max(0, min(x0, image_size)) + y0 = max(0, min(y0, image_size)) + x1 = max(0, min(x1, image_size)) + y1 = max(0, min(y1, image_size)) + patch = image[y0:y1, x0:x1] + return patch + + def isvalid_cursor(input_img, cursor, raster_size, image_size): + # input_img: (image_size, image_size, 3), [0.0-BG, 1.0-stroke] + cursor_large = cursor * float(image_size) + cursor_large = np.round(cursor_large).astype(np.int32) + input_crop_patch = crop_patch(input_img, cursor_large, image_size, raster_size) + if np.sum(input_crop_patch) > 0.0: + return True + else: + return False + + def randomly_move_cursor(cursor_position, img_size, min_dist_p, max_dist_p): + # cursor_position: (2), [0.0, 1.0) + cursor_pos_large = cursor_position * img_size + min_dist = int(min_dist_p / 2.0 * img_size) + max_dist = int(max_dist_p / 2.0 * img_size) + rand_cursor_offset = np.random.randint(min_dist, max_dist, size=cursor_pos_large.shape) + rand_cursor_offset_sign = np.random.randint(0, 1 + 1, size=cursor_pos_large.shape) + rand_cursor_offset_sign[rand_cursor_offset_sign == 0] = -1 + rand_cursor_offset = rand_cursor_offset * rand_cursor_offset_sign + + new_cursor_pos_large = cursor_pos_large + rand_cursor_offset + new_cursor_pos_large = np.minimum(np.maximum(new_cursor_pos_large, 0), img_size - 1) # (2), large-level + new_cursor_pos = new_cursor_pos_large.astype(np.float32) / float(img_size) + return new_cursor_pos + + input_image = 1.0 - input_image_[0] # (image_size, image_size, 3), [0-BG, 1-stroke] + img_size = input_image.shape[0] + + new_cursor_pos = [] + for cursor_i in range(current_pos_list.shape[0]): + curr_cursor = current_pos_list[cursor_i][0] + + for trial_i in range(trial_times): + new_cursor = randomly_move_cursor(curr_cursor, img_size, move_min_dist, move_max_dist) # (2), [0.0, 1.0) + + if isvalid_cursor(input_image, new_cursor, patch_size, img_size) or trial_i == trial_times - 1: + new_cursor_pos.append(new_cursor) + break + + assert len(new_cursor_pos) == current_pos_list.shape[0] + new_cursor_pos = np.expand_dims(np.stack(new_cursor_pos, axis=0), axis=1) # (select_times, 1, 2), [0.0, 1.0) + return new_cursor_pos + + +def sample(sess, model, input_photos, init_cursor, image_size, init_len, seq_lens, + state_dependent, pasting_func, round_stop_state_num, + min_dist_p, max_dist_p): + """Samples a sequence from a pre-trained model.""" + select_times = 1 + curr_canvas = np.zeros(dtype=np.float32, + shape=(select_times, image_size, image_size)) # [0.0-BG, 1.0-stroke] + + initial_state = sess.run(model.initial_state) + + params_list = [[] for _ in range(select_times)] + state_raw_list = [[] for _ in range(select_times)] + state_soft_list = [[] for _ in range(select_times)] + window_size_list = [[] for _ in range(select_times)] + + round_cursor_list = [] + round_length_real_list = [] + + input_photos_tiles = np.tile(input_photos, (select_times, 1, 1, 1)) + + for cursor_i, seq_len in enumerate(seq_lens): + if cursor_i == 0: + cursor_pos = np.squeeze(init_cursor, axis=0) # (select_times, 1, 2) + else: + cursor_pos = move_cursor_to_undrawn(cursor_pos, input_photos, model.hps.raster_size, + min_dist_p, max_dist_p) # (select_times, 1, 2) + round_cursor_list.append(cursor_pos) + + prev_state = initial_state + prev_width = np.stack([model.hps.min_width for _ in range(select_times)], axis=0) + prev_scaling = np.ones((select_times), dtype=np.float32) # (N) + prev_window_size = np.ones((select_times), dtype=np.float32) * model.hps.raster_size # (N) + + continuous_one_state_num = 0 + + for i in range(seq_len): + if not state_dependent and i % init_len == 0: + prev_state = initial_state + + curr_window_size = prev_scaling * prev_window_size # (N) + curr_window_size = np.maximum(curr_window_size, model.hps.min_window_size) + curr_window_size = np.minimum(curr_window_size, image_size) + + feed = { + model.initial_state: prev_state, + model.input_photo: input_photos_tiles, + model.curr_canvas_hard: curr_canvas.copy(), + model.cursor_position: cursor_pos, + model.image_size: image_size, + model.init_width: prev_width, + model.init_scaling: prev_scaling, + model.init_window_size: prev_window_size, + } + + o_other_params_list, o_pen_list, o_pred_params_list, next_state_list = \ + sess.run([model.other_params, model.pen_ras, model.pred_params, model.final_state], feed_dict=feed) + # o_other_params: (N, 6), o_pen: (N, 2), pred_params: (N, 1, 7), next_state: (N, 1024) + # o_other_params: [tanh*2, sigmoid*2, tanh*2, sigmoid*2] + + idx_eos_list = np.argmax(o_pen_list, axis=1) # (N) + + output_i = 0 + idx_eos = idx_eos_list[output_i] + + eos = [0, 0] + eos[idx_eos] = 1 + + other_params = o_other_params_list[output_i].tolist() # (6) + params_list[output_i].append([eos[1]] + other_params) + state_raw_list[output_i].append(o_pen_list[output_i][1]) + state_soft_list[output_i].append(o_pred_params_list[output_i, 0, 0]) + window_size_list[output_i].append(curr_window_size[output_i]) + + # draw the stroke and add to the canvas + x1y1, x2y2, width2 = o_other_params_list[output_i, 0:2], o_other_params_list[output_i, 2:4], \ + o_other_params_list[output_i, 4] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width[output_i], width2], axis=0) # (2) + o_other_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1).tolist() # (8) + + if idx_eos == 0: + f = o_other_params_proc + [1.0, 1.0] + pred_stroke_img = draw(f) # (raster_size, raster_size), [0.0-stroke, 1.0-BG] + pred_stroke_img_large = image_pasting_v3_testing(1.0 - pred_stroke_img, + cursor_pos[output_i, 0], + image_size, + curr_window_size[output_i], + pasting_func, sess) # [0.0-BG, 1.0-stroke] + curr_canvas[output_i] += pred_stroke_img_large # [0.0-BG, 1.0-stroke] + + continuous_one_state_num = 0 + else: + continuous_one_state_num += 1 + + curr_canvas = np.clip(curr_canvas, 0.0, 1.0) + + next_width = o_other_params_list[:, 4] # (N) + next_scaling = o_other_params_list[:, 5] + next_window_size = next_scaling * curr_window_size # (N) + next_window_size = np.maximum(next_window_size, model.hps.min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_state = next_state_list + prev_width = next_width * curr_window_size / next_window_size # (N,) + prev_scaling = next_scaling # (N) + prev_window_size = curr_window_size + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = o_other_params_list[:, 2:4] * ( + np.expand_dims(curr_window_size, axis=-1) / 2.0) # (N, 2), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[:, 1:2], new_cursor_offset_next[:, 0:1]], + axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + stroke_position_next = cursor_pos_large[:, 0, :] + new_cursor_offset_next # (N, 2), large-level + + if model.hps.cursor_type == 'next': + cursor_pos_large = stroke_position_next # (N, 2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), + float(image_size - 1)) # (N, 2), large-level + cursor_pos_large = np.expand_dims(cursor_pos_large, axis=1) # (N, 1, 2) + cursor_pos = cursor_pos_large / float(image_size) + + if continuous_one_state_num >= round_stop_state_num or i == seq_len - 1: + round_length_real_list.append(i + 1) + break + + return params_list, state_raw_list, state_soft_list, curr_canvas, window_size_list, \ + round_cursor_list, round_length_real_list + + +def main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, + sampling_num, + min_dist_p, max_dist_p, + longer_infer_lens, round_stop_state_num, + draw_seq=False, draw_order=False, + state_dependent=True): + model_params_default = hparams.get_default_hparams_rough() + model_params = update_hyperparams(model_params_default, model_base_dir, model_name, infer_dataset=test_dataset) + + [test_set, eval_hps_model, sample_hps_model] = \ + load_dataset_testing(test_image_base_dir, test_dataset, test_image_name, model_params) + + test_image_raw_name = test_image_name[:test_image_name.find('.')] + model_dir = os.path.join(model_base_dir, model_name) + + reset_graph() + sampling_model = VirtualSketchingModel(sample_hps_model) + + # differentiable pasting graph + paste_v3_func = DiffPastingV3(sample_hps_model.raster_size) + + tfconfig = tf.ConfigProto() + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + # loads the weights from checkpoint into our model + snapshot_step = load_checkpoint(sess, model_dir, gen_model_pretrain=True) + print('snapshot_step', snapshot_step) + sampling_dir = os.path.join(sampling_base_dir, test_dataset + '__' + model_name) + os.makedirs(sampling_dir, exist_ok=True) + + for sampling_i in range(sampling_num): + input_photos, init_cursors, test_image_size = test_set.get_test_image() + # input_photos: (1, image_size, image_size, 3), [0-stroke, 1-BG] + # init_cursors: (N, 1, 2), in size [0.0, 1.0) + + print() + print(test_image_name, ', image_size:', test_image_size, ', sampling_i:', sampling_i) + print('Processing ...') + + if init_cursors.ndim == 3: + init_cursors = np.expand_dims(init_cursors, axis=0) + + input_photos = input_photos[0:1, :, :, :] + + ori_img = (input_photos.copy()[0] * 255.0).astype(np.uint8) + ori_img_png = Image.fromarray(ori_img, 'RGB') + ori_img_png.save(os.path.join(sampling_dir, test_image_raw_name + '_input.png'), 'PNG') + + # decoding for sampling + strokes_raw_out_list, states_raw_out_list, states_soft_out_list, pred_imgs_out, \ + window_size_out_list, round_new_cursors, round_new_lengths = sample( + sess, sampling_model, input_photos, init_cursors, test_image_size, + eval_hps_model.max_seq_len, longer_infer_lens, state_dependent, paste_v3_func, + round_stop_state_num, min_dist_p, max_dist_p) + # pred_imgs_out: (N, H, W), [0.0-BG, 1.0-stroke] + + print('## round_lengths:', len(round_new_lengths), ':', round_new_lengths) + + output_i = 0 + strokes_raw_out = np.stack(strokes_raw_out_list[output_i], axis=0) + states_raw_out = states_raw_out_list[output_i] + states_soft_out = states_soft_out_list[output_i] + window_size_out = window_size_out_list[output_i] + + multi_cursors = [init_cursors[0, output_i, 0]] + for c_i in range(len(round_new_cursors)): + best_cursor = round_new_cursors[c_i][output_i, 0] # (2) + multi_cursors.append(best_cursor) + assert len(multi_cursors) == len(round_new_lengths) + + print('strokes_raw_out', strokes_raw_out.shape) + + clean_states_soft_out = np.array(states_soft_out) # (N) + + flag_list = strokes_raw_out[:, 0].astype(np.int32) # (N) + drawing_len = len(flag_list) - np.sum(flag_list) + assert drawing_len >= 0 + + # print(' flag raw\t soft\t x1\t\t y1\t\t x2\t\t y2\t\t r2\t\t s2') + for i in range(strokes_raw_out.shape[0]): + flag, x1, y1, x2, y2, r2, s2 = strokes_raw_out[i] + win_size = window_size_out[i] + out_format = '#%d: %d | %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' + out_values = (i, flag, states_raw_out[i], clean_states_soft_out[i], x1, y1, x2, y2, r2, s2) + out_log = out_format % out_values + # print(out_log) + + print('Saving results ...') + save_seq_data(sampling_dir, test_image_raw_name + '_' + str(sampling_i), + strokes_raw_out, multi_cursors, + test_image_size, round_new_lengths, eval_hps_model.min_width) + + draw_strokes(strokes_raw_out, sampling_dir, test_image_raw_name + '_' + str(sampling_i) + '_pred.png', + ori_img, test_image_size, + multi_cursors, round_new_lengths, eval_hps_model.min_width, eval_hps_model.cursor_type, + sample_hps_model.raster_size, sample_hps_model.min_window_size, + sess, + pasting_func=paste_v3_func, + save_seq=draw_seq, draw_order=draw_order) + + +def main(model_name, test_image_name, sampling_num): + test_dataset = 'rough_sketches' + test_image_base_dir = 'sample_inputs' + + sampling_base_dir = 'outputs/sampling' + model_base_dir = 'outputs/snapshot' + + state_dependent = False + longer_infer_lens = [128 for _ in range(10)] + round_stop_state_num = 12 + min_dist_p = 0.3 + max_dist_p = 0.9 + + draw_seq = False + draw_color_order = True + + # set numpy output to something sensible + np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True) + + main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, sampling_num, + min_dist_p=min_dist_p, max_dist_p=max_dist_p, + draw_seq=draw_seq, draw_order=draw_color_order, + state_dependent=state_dependent, longer_infer_lens=longer_infer_lens, + round_stop_state_num=round_stop_state_num) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--input', '-i', type=str, default='', help="The test image name.") + parser.add_argument('--model', '-m', type=str, default='pretrain_rough_sketches', help="The trained model.") + parser.add_argument('--sample', '-s', type=int, default=1, help="The number of outputs.") + args = parser.parse_args() + + assert args.input != '' + assert args.sample > 0 + + main(args.model, args.input, args.sample) diff --git a/robot_painting/qmupd_vs/test_seq_style3.py b/robot_painting/qmupd_vs/test_seq_style3.py new file mode 100644 index 0000000000000000000000000000000000000000..fe6e7c302a88c56498b43f41b67d02ad6f751830 --- /dev/null +++ b/robot_painting/qmupd_vs/test_seq_style3.py @@ -0,0 +1,378 @@ +import os, glob + +#================== settings ================== +exp = 'QMUPD_model';epoch='200' +dataroot = 'examples/test' +gpu_id = '-1' + +netga = 'resnet_style2_9blocks' +model0_res = 0 +model1_res = 0 +imgsize = 512 +extraflag = ' --netga %s --model0_res %d --model1_res %d' % (netga, model0_res, model1_res) + +import numpy as np +import os +import tensorflow as tf +from six.moves import range +from PIL import Image +import argparse + +import hyper_parameters as hparams +from model_common_test import DiffPastingV3, VirtualSketchingModel +from utils import reset_graph, load_checkpoint, update_hyperparams, draw, \ + save_seq_data, image_pasting_v3_testing, draw_strokes +from dataset_utils import load_dataset_testing + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + +def move_cursor_to_undrawn(current_pos_list, input_image_, patch_size, + move_min_dist, move_max_dist, trial_times=20): + """ + :param current_pos_list: (select_times, 1, 2), [0.0, 1.0) + :param input_image_: (1, image_size, image_size, 3), [0-stroke, 1-BG] + :return: new_cursor_pos: (select_times, 1, 2), [0.0, 1.0) + """ + + def crop_patch(image, center, image_size, crop_size): + x0 = center[0] - crop_size // 2 + x1 = x0 + crop_size + y0 = center[1] - crop_size // 2 + y1 = y0 + crop_size + x0 = max(0, min(x0, image_size)) + y0 = max(0, min(y0, image_size)) + x1 = max(0, min(x1, image_size)) + y1 = max(0, min(y1, image_size)) + patch = image[y0:y1, x0:x1] + return patch + + def isvalid_cursor(input_img, cursor, raster_size, image_size): + # input_img: (image_size, image_size, 3), [0.0-BG, 1.0-stroke] + cursor_large = cursor * float(image_size) + cursor_large = np.round(cursor_large).astype(np.int32) + input_crop_patch = crop_patch(input_img, cursor_large, image_size, raster_size) + if np.sum(input_crop_patch) > 0.0: + return True + else: + return False + + def randomly_move_cursor(cursor_position, img_size, min_dist_p, max_dist_p): + # cursor_position: (2), [0.0, 1.0) + cursor_pos_large = cursor_position * img_size + min_dist = int(min_dist_p / 2.0 * img_size) + max_dist = int(max_dist_p / 2.0 * img_size) + rand_cursor_offset = np.random.randint(min_dist, max_dist, size=cursor_pos_large.shape) + rand_cursor_offset_sign = np.random.randint(0, 1 + 1, size=cursor_pos_large.shape) + rand_cursor_offset_sign[rand_cursor_offset_sign == 0] = -1 + rand_cursor_offset = rand_cursor_offset * rand_cursor_offset_sign + + new_cursor_pos_large = cursor_pos_large + rand_cursor_offset + new_cursor_pos_large = np.minimum(np.maximum(new_cursor_pos_large, 0), img_size - 1) # (2), large-level + new_cursor_pos = new_cursor_pos_large.astype(np.float32) / float(img_size) + return new_cursor_pos + + input_image = 1.0 - input_image_[0] # (image_size, image_size, 3), [0-BG, 1-stroke] + img_size = input_image.shape[0] + + new_cursor_pos = [] + for cursor_i in range(current_pos_list.shape[0]): + curr_cursor = current_pos_list[cursor_i][0] + + for trial_i in range(trial_times): + new_cursor = randomly_move_cursor(curr_cursor, img_size, move_min_dist, move_max_dist) # (2), [0.0, 1.0) + + if isvalid_cursor(input_image, new_cursor, patch_size, img_size) or trial_i == trial_times - 1: + new_cursor_pos.append(new_cursor) + break + + assert len(new_cursor_pos) == current_pos_list.shape[0] + new_cursor_pos = np.expand_dims(np.stack(new_cursor_pos, axis=0), axis=1) # (select_times, 1, 2), [0.0, 1.0) + return new_cursor_pos + + +def sample(sess, model, input_photos, init_cursor, image_size, init_len, seq_lens, + state_dependent, pasting_func, round_stop_state_num, + min_dist_p, max_dist_p): + """Samples a sequence from a pre-trained model.""" + select_times = 1 + curr_canvas = np.zeros(dtype=np.float32, + shape=(select_times, image_size, image_size)) # [0.0-BG, 1.0-stroke] + + initial_state = sess.run(model.initial_state) + + params_list = [[] for _ in range(select_times)] + state_raw_list = [[] for _ in range(select_times)] + state_soft_list = [[] for _ in range(select_times)] + window_size_list = [[] for _ in range(select_times)] + + round_cursor_list = [] + round_length_real_list = [] + + input_photos_tiles = np.tile(input_photos, (select_times, 1, 1, 1)) + + for cursor_i, seq_len in enumerate(seq_lens): + if cursor_i == 0: + cursor_pos = np.squeeze(init_cursor, axis=0) # (select_times, 1, 2) + else: + cursor_pos = move_cursor_to_undrawn(cursor_pos, input_photos, model.hps.raster_size, + min_dist_p, max_dist_p) # (select_times, 1, 2) + round_cursor_list.append(cursor_pos) + + prev_state = initial_state + prev_width = np.stack([model.hps.min_width for _ in range(select_times)], axis=0) + prev_scaling = np.ones((select_times), dtype=np.float32) # (N) + prev_window_size = np.ones((select_times), dtype=np.float32) * model.hps.raster_size # (N) + + continuous_one_state_num = 0 + + for i in range(seq_len): + if not state_dependent and i % init_len == 0: + prev_state = initial_state + + curr_window_size = prev_scaling * prev_window_size # (N) + curr_window_size = np.maximum(curr_window_size, model.hps.min_window_size) + curr_window_size = np.minimum(curr_window_size, image_size) + + feed = { + model.initial_state: prev_state, + model.input_photo: input_photos_tiles, + model.curr_canvas_hard: curr_canvas.copy(), + model.cursor_position: cursor_pos, + model.image_size: image_size, + model.init_width: prev_width, + model.init_scaling: prev_scaling, + model.init_window_size: prev_window_size, + } + + o_other_params_list, o_pen_list, o_pred_params_list, next_state_list = \ + sess.run([model.other_params, model.pen_ras, model.pred_params, model.final_state], feed_dict=feed) + # o_other_params: (N, 6), o_pen: (N, 2), pred_params: (N, 1, 7), next_state: (N, 1024) + # o_other_params: [tanh*2, sigmoid*2, tanh*2, sigmoid*2] + + idx_eos_list = np.argmax(o_pen_list, axis=1) # (N) + + output_i = 0 + idx_eos = idx_eos_list[output_i] + + eos = [0, 0] + eos[idx_eos] = 1 + + other_params = o_other_params_list[output_i].tolist() # (6) + params_list[output_i].append([eos[1]] + other_params) + state_raw_list[output_i].append(o_pen_list[output_i][1]) + state_soft_list[output_i].append(o_pred_params_list[output_i, 0, 0]) + window_size_list[output_i].append(curr_window_size[output_i]) + + # draw the stroke and add to the canvas + x1y1, x2y2, width2 = o_other_params_list[output_i, 0:2], o_other_params_list[output_i, 2:4], \ + o_other_params_list[output_i, 4] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width[output_i], width2], axis=0) # (2) + o_other_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1).tolist() # (8) + + if idx_eos == 0: + f = o_other_params_proc + [1.0, 1.0] + pred_stroke_img = draw(f) # (raster_size, raster_size), [0.0-stroke, 1.0-BG] + pred_stroke_img_large = image_pasting_v3_testing(1.0 - pred_stroke_img, + cursor_pos[output_i, 0], + image_size, + curr_window_size[output_i], + pasting_func, sess) # [0.0-BG, 1.0-stroke] + curr_canvas[output_i] += pred_stroke_img_large # [0.0-BG, 1.0-stroke] + + continuous_one_state_num = 0 + else: + continuous_one_state_num += 1 + + curr_canvas = np.clip(curr_canvas, 0.0, 1.0) + + next_width = o_other_params_list[:, 4] # (N) + next_scaling = o_other_params_list[:, 5] + next_window_size = next_scaling * curr_window_size # (N) + next_window_size = np.maximum(next_window_size, model.hps.min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_state = next_state_list + prev_width = next_width * curr_window_size / next_window_size # (N,) + prev_scaling = next_scaling # (N) + prev_window_size = curr_window_size + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = o_other_params_list[:, 2:4] * ( + np.expand_dims(curr_window_size, axis=-1) / 2.0) # (N, 2), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[:, 1:2], new_cursor_offset_next[:, 0:1]], + axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + stroke_position_next = cursor_pos_large[:, 0, :] + new_cursor_offset_next # (N, 2), large-level + + if model.hps.cursor_type == 'next': + cursor_pos_large = stroke_position_next # (N, 2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), + float(image_size - 1)) # (N, 2), large-level + cursor_pos_large = np.expand_dims(cursor_pos_large, axis=1) # (N, 1, 2) + cursor_pos = cursor_pos_large / float(image_size) + + if continuous_one_state_num >= round_stop_state_num or i == seq_len - 1: + round_length_real_list.append(i + 1) + break + + return params_list, state_raw_list, state_soft_list, curr_canvas, window_size_list, \ + round_cursor_list, round_length_real_list + + +def main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, + sampling_num, + min_dist_p, max_dist_p, + longer_infer_lens, round_stop_state_num, + draw_seq=False, draw_order=False, + state_dependent=True): + model_params_default = hparams.get_default_hparams_rough() + model_params = update_hyperparams(model_params_default, model_base_dir, model_name, infer_dataset=test_dataset) + + [test_set, eval_hps_model, sample_hps_model] = \ + load_dataset_testing(test_image_base_dir, test_dataset, test_image_name, model_params) + + test_image_raw_name = test_image_name[:test_image_name.find('.')] + model_dir = os.path.join(model_base_dir, model_name) + + reset_graph() + sampling_model = VirtualSketchingModel(sample_hps_model) + + # differentiable pasting graph + paste_v3_func = DiffPastingV3(sample_hps_model.raster_size) + + tfconfig = tf.ConfigProto() + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + # loads the weights from checkpoint into our model + snapshot_step = load_checkpoint(sess, model_dir, gen_model_pretrain=True) + print('snapshot_step', snapshot_step) + sampling_dir = os.path.join(sampling_base_dir, test_dataset + '__' + model_name) + os.makedirs(sampling_dir, exist_ok=True) + + for sampling_i in range(sampling_num): + input_photos, init_cursors, test_image_size = test_set.get_test_image() + # input_photos: (1, image_size, image_size, 3), [0-stroke, 1-BG] + # init_cursors: (N, 1, 2), in size [0.0, 1.0) + + print() + print(test_image_name, ', image_size:', test_image_size, ', sampling_i:', sampling_i) + print('Processing ...') + + if init_cursors.ndim == 3: + init_cursors = np.expand_dims(init_cursors, axis=0) + + input_photos = input_photos[0:1, :, :, :] + + ori_img = (input_photos.copy()[0] * 255.0).astype(np.uint8) + ori_img_png = Image.fromarray(ori_img, 'RGB') + ori_img_png.save(os.path.join(sampling_dir, test_image_raw_name + '_input.png'), 'PNG') + + # decoding for sampling + strokes_raw_out_list, states_raw_out_list, states_soft_out_list, pred_imgs_out, \ + window_size_out_list, round_new_cursors, round_new_lengths = sample( + sess, sampling_model, input_photos, init_cursors, test_image_size, + eval_hps_model.max_seq_len, longer_infer_lens, state_dependent, paste_v3_func, + round_stop_state_num, min_dist_p, max_dist_p) + # pred_imgs_out: (N, H, W), [0.0-BG, 1.0-stroke] + + print('## round_lengths:', len(round_new_lengths), ':', round_new_lengths) + + output_i = 0 + strokes_raw_out = np.stack(strokes_raw_out_list[output_i], axis=0) + states_raw_out = states_raw_out_list[output_i] + states_soft_out = states_soft_out_list[output_i] + window_size_out = window_size_out_list[output_i] + + multi_cursors = [init_cursors[0, output_i, 0]] + for c_i in range(len(round_new_cursors)): + best_cursor = round_new_cursors[c_i][output_i, 0] # (2) + multi_cursors.append(best_cursor) + assert len(multi_cursors) == len(round_new_lengths) + + print('strokes_raw_out', strokes_raw_out.shape) + + clean_states_soft_out = np.array(states_soft_out) # (N) + + flag_list = strokes_raw_out[:, 0].astype(np.int32) # (N) + drawing_len = len(flag_list) - np.sum(flag_list) + assert drawing_len >= 0 + + # print(' flag raw\t soft\t x1\t\t y1\t\t x2\t\t y2\t\t r2\t\t s2') + for i in range(strokes_raw_out.shape[0]): + flag, x1, y1, x2, y2, r2, s2 = strokes_raw_out[i] + win_size = window_size_out[i] + out_format = '#%d: %d | %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' + out_values = (i, flag, states_raw_out[i], clean_states_soft_out[i], x1, y1, x2, y2, r2, s2) + out_log = out_format % out_values + # print(out_log) + + print('Saving results ...') + save_seq_data(sampling_dir, test_image_raw_name + '_' + str(sampling_i), + strokes_raw_out, multi_cursors, + test_image_size, round_new_lengths, eval_hps_model.min_width) + + draw_strokes(strokes_raw_out, sampling_dir, test_image_raw_name + '_' + str(sampling_i) + '_pred.png', + ori_img, test_image_size, + multi_cursors, round_new_lengths, eval_hps_model.min_width, eval_hps_model.cursor_type, + sample_hps_model.raster_size, sample_hps_model.min_window_size, + sess, + pasting_func=paste_v3_func, + save_seq=draw_seq, draw_order=draw_order) + + +def main(model_name, test_image_name, sampling_num): + test_dataset = 'rough_sketches' + # test_image_base_dir = 'sample_inputs' + test_image_base_dir = 'results/QMUPD_model/test_200/imagesstyle0-0-1' + + sampling_base_dir = 'outputs/sampling' + model_base_dir = 'outputs/snapshot' + + state_dependent = False + longer_infer_lens = [128 for _ in range(10)] + round_stop_state_num = 12 + min_dist_p = 0.3 + max_dist_p = 0.9 + + draw_seq = False + draw_color_order = True + + # set numpy output to something sensible + np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True) + + main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, sampling_num, + min_dist_p=min_dist_p, max_dist_p=max_dist_p, + draw_seq=draw_seq, draw_order=draw_color_order, + state_dependent=state_dependent, longer_infer_lens=longer_infer_lens, + round_stop_state_num=round_stop_state_num) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--input', '-i', type=str, default='', help="The test image name.") + parser.add_argument('--model', '-m', type=str, default='pretrain_rough_sketches', help="The trained model.") + parser.add_argument('--sample', '-s', type=int, default=1, help="The number of outputs.") + args = parser.parse_args() + + assert args.input != '' + assert args.sample > 0 + + #==================== command ================== + + print(args.input, args.model, args.sample) + main(args.model, args.input, args.sample) diff --git a/robot_painting/qmupd_vs/test_vectorization.py b/robot_painting/qmupd_vs/test_vectorization.py new file mode 100644 index 0000000000000000000000000000000000000000..31260f57057a39f331b71ec6ce6435822acbfdf1 --- /dev/null +++ b/robot_painting/qmupd_vs/test_vectorization.py @@ -0,0 +1,463 @@ +import numpy as np +import random +import os +import tensorflow as tf +from six.moves import range +from PIL import Image +import time +import argparse + +import hyper_parameters as hparams +from model_common_test import DiffPastingV3, VirtualSketchingModel +from utils import reset_graph, load_checkpoint, update_hyperparams, draw, \ + save_seq_data, image_pasting_v3_testing, draw_strokes +from dataset_utils import load_dataset_testing + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + +def move_cursor_to_undrawn(current_canvas_list, input_image_, last_min_acc_list, grid_patch_size=128, + stroke_acc_threshold=0.95, stroke_num_threshold=5, continuous_min_acc_threshold=2): + """ + :param current_canvas_list: (select_times, image_size, image_size), [0.0-BG, 1.0-stroke] + :param input_image_: (1, image_size, image_size), [0-stroke, 1-BG] + :return: new_cursor_pos: (select_times, 1, 2), [0.0, 1.0) + """ + def split_images(in_img, image_size, grid_size): + if image_size % grid_size == 0: + paddings_ = 0 + else: + paddings_ = grid_size - image_size % grid_size + paddings = [[0, paddings_], + [0, paddings_]] + image_pad = np.pad(in_img, paddings, mode='constant', constant_values=0.0) # (H_p, W_p), [0.0-BG, 1.0-stroke] + + assert image_pad.shape[0] % grid_size == 0 + + split_num = image_pad.shape[0] // grid_size + + images_h = np.hsplit(image_pad, split_num) + image_patches = [] + for image_h in images_h: + images_v = np.vsplit(image_h, split_num) + image_patches += images_v + image_patches = np.array(image_patches, dtype=np.float32) + return image_patches, split_num + + def line_drawing_rounding(line_drawing): + line_drawing_r = np.copy(line_drawing) # [0.0-BG, 1.0-stroke] + line_drawing_r[line_drawing_r != 0.0] = 1.0 + return line_drawing_r + + def cal_undrawn_pixels(in_canvas, in_sketch): + in_canvas_round = line_drawing_rounding(in_canvas).astype(np.int32) # (N, H, W), [0.0-BG, 1.0-stroke] + in_sketch_round = line_drawing_rounding(in_sketch).astype(np.int32) + + intersection = np.bitwise_and(in_canvas_round, in_sketch_round) + + intersection_sum = np.sum(intersection, axis=(1, 2)) + gt_sum = np.sum(in_sketch_round, axis=(1, 2)) # (N) + + undrawn_num = gt_sum - intersection_sum + return undrawn_num + + def cal_stroke_acc(in_canvas, in_sketch): + in_canvas_round = line_drawing_rounding(in_canvas).astype(np.int32) # (N, H, W), [0.0-BG, 1.0-stroke] + in_sketch_round = line_drawing_rounding(in_sketch).astype(np.int32) + + intersection = np.bitwise_and(in_canvas_round, in_sketch_round) + + intersection_sum = np.sum(intersection, axis=(1, 2)).astype(np.float32) + gt_sum = np.sum(in_sketch_round, axis=(1, 2)).astype(np.float32) # (N) + undrawn_num = gt_sum - intersection_sum # (N) + + stroke_acc = intersection_sum / gt_sum # (N) + stroke_acc[gt_sum == 0.0] = 1.0 + stroke_acc[undrawn_num <= stroke_num_threshold] = 1.0 + return stroke_acc + + def get_cursor(patch_idx, img_size, grid_size, split_num): + y_pos = patch_idx % split_num + x_pos = patch_idx // split_num + + y_top = y_pos * grid_size + grid_size // 4 + y_bottom = y_top + grid_size // 2 + x_left = x_pos * grid_size + grid_size // 4 + x_right = x_left + grid_size // 2 + + cursor_y = random.randint(y_top, y_bottom) + cursor_x = random.randint(x_left, x_right) + + cursor_y = max(0, min(cursor_y, img_size - 1)) + cursor_x = max(0, min(cursor_x, img_size - 1)) # (2), in large size + center = np.array([cursor_x, cursor_y], dtype=np.float32) + + return center / float(img_size) # (2), in size [0.0, 1.0) + + input_image = 1.0 - input_image_[0] # (image_size, image_size), [0-BG, 1-stroke] + img_size = input_image.shape[0] + + input_image_patches, split_number = split_images(input_image, img_size, grid_patch_size) # (N, grid_size, grid_size) + + new_cursor_pos = [] + last_min_acc_list_new = [item for item in last_min_acc_list] + for canvas_i in range(current_canvas_list.shape[0]): + curr_canvas = current_canvas_list[canvas_i] # (image_size, image_size), [0.0-BG, 1.0-stroke] + + curr_canvas_patches, _ = split_images(curr_canvas, img_size, grid_patch_size) # (N, grid_size, grid_size) + + # 1. detect ending flag by stroke accuracy + stroke_accuracy = cal_stroke_acc(curr_canvas_patches, input_image_patches) + min_acc_idx = np.argmin(stroke_accuracy) + min_acc= stroke_accuracy[min_acc_idx] + # print('min_acc_idx', min_acc_idx, ' | ', 'min_acc', min_acc) + + if min_acc >= stroke_acc_threshold: # end of drawing + return None, None + + # 2. detect undrawn pixels + undrawn_pixel_num = cal_undrawn_pixels(curr_canvas_patches, input_image_patches) + # undrawn_pixel_num_dis = np.reshape(undrawn_pixel_num, (split_number, split_number)).T + # print('undrawn_pixel_num_dis') + # print(undrawn_pixel_num_dis) + + max_undrawn_idx = np.argmax(undrawn_pixel_num) + # max_undrawn = undrawn_pixel_num[max_undrawn_idx] + # print('max_undrawn_idx', max_undrawn_idx, ' | ', 'max_undrawn', max_undrawn) + + # 3. select a random position + last_min_acc_idx, last_min_acc_times = last_min_acc_list[canvas_i] + if last_min_acc_times >= continuous_min_acc_threshold and last_min_acc_idx == min_acc_idx: + selected_patch_idx = last_min_acc_idx + new_min_acc_times = 1 + else: + selected_patch_idx = max_undrawn_idx + + if min_acc_idx == last_min_acc_idx: + new_min_acc_times = last_min_acc_times + 1 + else: + new_min_acc_times = 1 + + new_min_acc_idx = min_acc_idx + last_min_acc_list_new[canvas_i] = (new_min_acc_idx, new_min_acc_times) + # print('selected_patch_idx', selected_patch_idx) + + # 4. get cursor according to the selected_patch_idx + rand_cursor = get_cursor(selected_patch_idx, img_size, grid_patch_size, split_number) # (2), in size [0.0, 1.0) + new_cursor_pos.append(rand_cursor) + + assert len(new_cursor_pos) == current_canvas_list.shape[0] + new_cursor_pos = np.expand_dims(np.stack(new_cursor_pos, axis=0), axis=1) # (select_times, 1, 2), [0.0, 1.0) + return new_cursor_pos, last_min_acc_list_new + + +def sample(sess, model, input_photos, init_cursor, image_size, init_len, seq_lens, state_dependent, + pasting_func, round_stop_state_num, stroke_acc_threshold): + """Samples a sequence from a pre-trained model.""" + select_times = 1 + curr_canvas = np.zeros(dtype=np.float32, + shape=(select_times, image_size, image_size)) # [0.0-BG, 1.0-stroke] + + initial_state = sess.run(model.initial_state) + prev_width = np.stack([model.hps.min_width for _ in range(select_times)], axis=0) + + params_list = [[] for _ in range(select_times)] + state_raw_list = [[] for _ in range(select_times)] + state_soft_list = [[] for _ in range(select_times)] + window_size_list = [[] for _ in range(select_times)] + + last_min_stroke_acc_list = [(-1, 0) for _ in range(select_times)] + + round_cursor_list = [] + round_length_real_list = [] + + input_photos_tiles = np.tile(input_photos, (select_times, 1, 1)) + + for cursor_i, seq_len in enumerate(seq_lens): + # print('\n') + # print('@@ Round', cursor_i + 1) + if cursor_i == 0: + cursor_pos = np.squeeze(init_cursor, axis=0) # (select_times, 1, 2) + else: + cursor_pos, last_min_stroke_acc_list_updated = \ + move_cursor_to_undrawn(curr_canvas, input_photos, last_min_stroke_acc_list, + grid_patch_size=model.hps.raster_size, + stroke_acc_threshold=stroke_acc_threshold) # (select_times, 1, 2) + if cursor_pos is not None: + round_cursor_list.append(cursor_pos) + last_min_stroke_acc_list = last_min_stroke_acc_list_updated + else: + break + + prev_state = initial_state + if not model.hps.init_cursor_on_undrawn_pixel: + prev_width = np.stack([model.hps.min_width for _ in range(select_times)], axis=0) + prev_scaling = np.ones((select_times), dtype=np.float32) # (N) + prev_window_size = np.ones((select_times), dtype=np.float32) * model.hps.raster_size # (N) + + continuous_one_state_num = 0 + + for i in range(seq_len): + if not state_dependent and i % init_len == 0: + prev_state = initial_state + + curr_window_size = prev_scaling * prev_window_size # (N) + curr_window_size = np.maximum(curr_window_size, model.hps.min_window_size) + curr_window_size = np.minimum(curr_window_size, image_size) + + feed = { + model.initial_state: prev_state, + model.input_photo: np.expand_dims(input_photos_tiles, axis=-1), + model.curr_canvas_hard: curr_canvas.copy(), + model.cursor_position: cursor_pos, + model.image_size: image_size, + model.init_width: prev_width, + model.init_scaling: prev_scaling, + model.init_window_size: prev_window_size, + } + + o_other_params_list, o_pen_list, o_pred_params_list, next_state_list = \ + sess.run([model.other_params, model.pen_ras, model.pred_params, model.final_state], feed_dict=feed) + # o_other_params: (N, 6), o_pen: (N, 2), pred_params: (N, 1, 7), next_state: (N, 1024) + # o_other_params: [tanh*2, sigmoid*2, tanh*2, sigmoid*2] + + idx_eos_list = np.argmax(o_pen_list, axis=1) # (N) + + output_i = 0 + idx_eos = idx_eos_list[output_i] + + eos = [0, 0] + eos[idx_eos] = 1 + + other_params = o_other_params_list[output_i].tolist() # (6) + params_list[output_i].append([eos[1]] + other_params) + state_raw_list[output_i].append(o_pen_list[output_i][1]) + state_soft_list[output_i].append(o_pred_params_list[output_i, 0, 0]) + window_size_list[output_i].append(curr_window_size[output_i]) + + # draw the stroke and add to the canvas + x1y1, x2y2, width2 = o_other_params_list[output_i, 0:2], o_other_params_list[output_i, 2:4], \ + o_other_params_list[output_i, 4] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width[output_i], width2], axis=0) # (2) + o_other_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1).tolist() # (8) + + if idx_eos == 0: + f = o_other_params_proc + [1.0, 1.0] + pred_stroke_img = draw(f) # (raster_size, raster_size), [0.0-stroke, 1.0-BG] + pred_stroke_img_large = image_pasting_v3_testing(1.0 - pred_stroke_img, cursor_pos[output_i, 0], + image_size, + curr_window_size[output_i], + pasting_func, sess) # [0.0-BG, 1.0-stroke] + curr_canvas[output_i] += pred_stroke_img_large # [0.0-BG, 1.0-stroke] + + continuous_one_state_num = 0 + else: + continuous_one_state_num += 1 + + curr_canvas = np.clip(curr_canvas, 0.0, 1.0) + + next_width = o_other_params_list[:, 4] # (N) + next_scaling = o_other_params_list[:, 5] + next_window_size = next_scaling * curr_window_size # (N) + next_window_size = np.maximum(next_window_size, model.hps.min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_state = next_state_list + prev_width = next_width * curr_window_size / next_window_size # (N,) + prev_scaling = next_scaling # (N) + prev_window_size = curr_window_size + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = o_other_params_list[:, 2:4] * (np.expand_dims(curr_window_size, axis=-1) / 2.0) # (N, 2), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[:, 1:2], new_cursor_offset_next[:, 0:1]], axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + stroke_position_next = cursor_pos_large[:, 0, :] + new_cursor_offset_next # (N, 2), large-level + + if model.hps.cursor_type == 'next': + cursor_pos_large = stroke_position_next # (N, 2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), float(image_size - 1)) # (N, 2), large-level + cursor_pos_large = np.expand_dims(cursor_pos_large, axis=1) # (N, 1, 2) + cursor_pos = cursor_pos_large / float(image_size) + + if continuous_one_state_num >= round_stop_state_num or i == seq_len - 1: + round_length_real_list.append(i + 1) + break + + return params_list, state_raw_list, state_soft_list, curr_canvas, window_size_list, \ + round_cursor_list, round_length_real_list + + +def main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, + sampling_num, + longer_infer_lens, + round_stop_state_num, stroke_acc_threshold, + draw_seq=False, draw_order=False, + state_dependent=True): + model_params_default = hparams.get_default_hparams_clean() + model_params = update_hyperparams(model_params_default, model_base_dir, model_name, infer_dataset=test_dataset) + + [test_set, eval_hps_model, sample_hps_model] \ + = load_dataset_testing(test_image_base_dir, test_dataset, test_image_name, model_params) + + test_image_raw_name = test_image_name[:test_image_name.find('.')] + model_dir = os.path.join(model_base_dir, model_name) + + reset_graph() + sampling_model = VirtualSketchingModel(sample_hps_model) + + # differentiable pasting graph + paste_v3_func = DiffPastingV3(sample_hps_model.raster_size) + + tfconfig = tf.ConfigProto() + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + # loads the weights from checkpoint into our model + snapshot_step = load_checkpoint(sess, model_dir, gen_model_pretrain=True) + print('snapshot_step', snapshot_step) + sampling_dir = os.path.join(sampling_base_dir, test_dataset + '__' + model_name) + os.makedirs(sampling_dir, exist_ok=True) + + stroke_number_list = [] + compute_time_list = [] + + for sampling_i in range(sampling_num): + start_time_point = time.time() + input_photos, init_cursors, test_image_size = test_set.get_test_image() + # input_photos: (1, image_size, image_size), [0-stroke, 1-BG] + # init_cursors: (1, 1, 2), in size [0.0, 1.0) + + print() + print(test_image_name, ', image_size:', test_image_size, ', sampling_i:', sampling_i) + print('Processing ...') + + if init_cursors.ndim == 3: + init_cursors = np.expand_dims(init_cursors, axis=0) + + input_photos = input_photos[0:1, :, :] + ori_img = (input_photos.copy()[0] * 255.0).astype(np.uint8) + ori_img = np.stack([ori_img for _ in range(3)], axis=2) + ori_img_png = Image.fromarray(ori_img, 'RGB') + ori_img_png.save(os.path.join(sampling_dir, test_image_raw_name + '_input.png'), 'PNG') + + data_loading_time_point = time.time() + + # decoding for sampling + strokes_raw_out_list, states_raw_out_list, states_soft_out_list, pred_imgs_out, \ + window_size_out_list, round_new_cursors, round_new_lengths = sample( + sess, sampling_model, input_photos, init_cursors, test_image_size, + eval_hps_model.max_seq_len, longer_infer_lens, state_dependent, + paste_v3_func, round_stop_state_num, stroke_acc_threshold) + # pred_imgs_out: [0.0-BG, 1.0-stroke] + + print('## round_lengths:', len(round_new_lengths), ':', round_new_lengths) + + sampling_time_point = time.time() + + data_loading_time = data_loading_time_point - start_time_point + sampling_time_total = sampling_time_point - start_time_point + sampling_time_wo_data_loading = sampling_time_point - data_loading_time_point + compute_time_list.append(sampling_time_total) + # print(' >>> data_loading_time', data_loading_time) + print(' >>> sampling_time_total', sampling_time_total) + # print(' >>> sampling_time_wo_data_loading', sampling_time_wo_data_loading) + + best_result_idx = 0 + strokes_raw_out = np.stack(strokes_raw_out_list[best_result_idx], axis=0) + states_raw_out = states_raw_out_list[best_result_idx] + states_soft_out = states_soft_out_list[best_result_idx] + window_size_out = window_size_out_list[best_result_idx] + + multi_cursors = [init_cursors[0, best_result_idx, 0]] + for c_i in range(len(round_new_cursors)): + best_cursor = round_new_cursors[c_i][best_result_idx, 0] # (2) + multi_cursors.append(best_cursor) + assert len(multi_cursors) == len(round_new_lengths) + + print('strokes_raw_out', strokes_raw_out.shape) + stroke_number_list.append(strokes_raw_out.shape[0]) + + clean_states_soft_out = np.array(states_soft_out) # (N) + + flag_list = strokes_raw_out[:, 0].astype(np.int32) # (N) + drawing_len = len(flag_list) - np.sum(flag_list) + assert drawing_len >= 0 + + # print(' flag raw\t soft\t x1\t\t y1\t\t x2\t\t y2\t\t r2\t\t s2') + for i in range(strokes_raw_out.shape[0]): + flag, x1, y1, x2, y2, r2, s2 = strokes_raw_out[i] + win_size = window_size_out[i] + out_format = '#%d: %d | %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' + out_values = (i, flag, states_raw_out[i], clean_states_soft_out[i], x1, y1, x2, y2, r2, s2) + out_log = out_format % out_values + # print(out_log) + + print('Saving results ...') + save_seq_data(sampling_dir, test_image_raw_name + '_' + str(sampling_i), + strokes_raw_out, multi_cursors, + test_image_size, round_new_lengths, eval_hps_model.min_width) + + draw_strokes(strokes_raw_out, sampling_dir, test_image_raw_name + '_' + str(sampling_i) + '_pred.png', + ori_img, test_image_size, + multi_cursors, round_new_lengths, eval_hps_model.min_width, eval_hps_model.cursor_type, + sample_hps_model.raster_size, sample_hps_model.min_window_size, + sess, + pasting_func=paste_v3_func, + save_seq=draw_seq, draw_order=draw_order) + + average_stroke_number = np.mean(stroke_number_list) + average_compute_time = np.mean(compute_time_list) + print() + print('@@@ Total summary:') + print(' >>> average_stroke_number', average_stroke_number) + print(' >>> average_compute_time', average_compute_time) + + +def main(model_name, test_image_name, sampling_num): + test_dataset = 'clean_line_drawings' + test_image_base_dir = 'sample_inputs' + + sampling_base_dir = 'outputs/sampling' + model_base_dir = 'outputs/snapshot' + + state_dependent = False + longer_infer_lens = [500 for _ in range(10)] + round_stop_state_num = 12 + stroke_acc_threshold = 0.95 + + draw_seq = False + draw_color_order = True + + # set numpy output to something sensible + np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True) + + main_testing(test_image_base_dir, test_dataset, test_image_name, + sampling_base_dir, model_base_dir, model_name, sampling_num, + draw_seq=draw_seq, draw_order=draw_color_order, + state_dependent=state_dependent, longer_infer_lens=longer_infer_lens, + round_stop_state_num=round_stop_state_num, stroke_acc_threshold=stroke_acc_threshold) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--input', '-i', type=str, default='', help="The test image name.") + parser.add_argument('--model', '-m', type=str, default='pretrain_clean_line_drawings', help="The trained model.") + parser.add_argument('--sample', '-s', type=int, default=1, help="The number of outputs.") + args = parser.parse_args() + + assert args.input != '' + assert args.sample > 0 + + main(args.model, args.input, args.sample) diff --git a/robot_painting/qmupd_vs/tools/dynamic_draw_show.py b/robot_painting/qmupd_vs/tools/dynamic_draw_show.py new file mode 100644 index 0000000000000000000000000000000000000000..3145a381c436b58ba8f2e972e8b8722721d6da97 --- /dev/null +++ b/robot_painting/qmupd_vs/tools/dynamic_draw_show.py @@ -0,0 +1,41 @@ + +import argparse +import numpy as np +import cv2 +import random + +def load_contours_list(filename): + contours_list = [] + with open(filename, "r") as f: + for line in f: + points = line.strip().split(",") + # 去处最后一个空字符 + if points[-1] == '': + points = points[:-1] + contour = [] + for i in range(0, len(points), 2): + print(points[i], points[i+1]) + x, y = int(float(points[i])), int(float(points[i+1])) + contour.append([[x, y]]) + contours_list.append(contour) + return contours_list + +if __name__ == '__main__': + # 参数解析 + parser = argparse.ArgumentParser(description='Draw contours on images.') + parser.add_argument('--filename', type=str, help='The filename of the image.') + args = parser.parse_args() + filename = args.filename + contours_list = load_contours_list(filename) + # draw + image = np.zeros((512, 512, 3), dtype=np.uint8) + 255 + for contour in contours_list: + color = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255) + for i in range(len(contour)): + point = contour[i] + if i < len(contour) - 1: + cv2.line(image, tuple(contour[i][0]), tuple(contour[i+1][0]), color, 1) + # cv2.circle(image, tuple(point[0]), 1, (0, 255, 0), 1) + # cv2.line(image, tuple(contour[i]), tuple(contour[(i+1)%len(contour)]), (0, 255, 0), 1) + cv2.imshow('image', image) + cv2.waitKey(0) \ No newline at end of file diff --git a/robot_painting/qmupd_vs/tools/gif_making.py b/robot_painting/qmupd_vs/tools/gif_making.py new file mode 100644 index 0000000000000000000000000000000000000000..b1cf77f9333895dacf58bb669879bd20645f924a --- /dev/null +++ b/robot_painting/qmupd_vs/tools/gif_making.py @@ -0,0 +1,209 @@ +import os +import sys +import argparse +import numpy as np +from PIL import Image +import tensorflow as tf + +sys.path.append('./') +from utils import draw, image_pasting_v3_testing +from model_common_test import DiffPastingV3 + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + +def add_scaling_visualization(canvas_images, cursor, window_size, image_size): + """ + :param canvas_images: (N, H, W, 3) + :param cursor: + :param window_size: + :param image_size: + :return: + """ + cursor_pos = cursor * float(image_size) + cursor_x, cursor_y = int(round(cursor_pos[0])), int(round(cursor_pos[1])) # in large size + + vis_color = [255, 0, 0] + cursor_width = 3 + box_width = 2 + + canvas_imgs = 255 - np.round(canvas_images * 255.0).astype(np.uint8) + + # add cursor visualization + canvas_imgs[:, cursor_y - cursor_width: cursor_y + cursor_width, cursor_x - cursor_width: cursor_x + cursor_width, :] = vis_color + + # add box visualization + up = max(0, cursor_y - window_size // 2) + down = min(image_size, cursor_y + window_size // 2) + left = max(0, cursor_x - window_size // 2) + right = min(image_size, cursor_x + window_size // 2) + # up = cursor_y - window_size // 2 + # down = cursor_y + window_size // 2 + # left = cursor_x - window_size // 2 + # right = cursor_x + window_size // 2 + + if up > 0: + canvas_imgs[:, up: up + box_width, left: right, :] = vis_color + if down < image_size: + canvas_imgs[:, down - box_width: down, left: right, :] = vis_color + if left > 0: + canvas_imgs[:, up: down, left: left + box_width, :] = vis_color + if right < image_size: + canvas_imgs[:, up: down, right - box_width: right, :] = vis_color + return canvas_imgs + + +def make_gif(sess, pasting_func, data, init_cursor, image_size, infer_lengths, init_width, + save_base, + cursor_type='next', min_window_size=32, raster_size=128, add_box=True): + """ + :param data: (N_strokes, 9): flag, x0, y0, x1, y1, x2, y2, r0, r2 + :return: + """ + canvas = np.zeros((image_size, image_size), dtype=np.float32) # [0.0-BG, 1.0-stroke] + gif_frames = [] + + cursor_idx = 0 + + if init_cursor.ndim == 1: + init_cursor = [init_cursor] + + for round_idx in range(len(infer_lengths)): + print('Making progress', round_idx + 1, '/', len(infer_lengths)) + round_length = infer_lengths[round_idx] + + cursor_pos = init_cursor[cursor_idx] # (2) + cursor_idx += 1 + + prev_width = init_width + prev_scaling = 1.0 + prev_window_size = float(raster_size) # (1) + + for round_inner_i in range(round_length): + stroke_idx = np.sum(infer_lengths[:round_idx]).astype(np.int32) + round_inner_i + + curr_window_size_raw = prev_scaling * prev_window_size + curr_window_size_raw = np.maximum(curr_window_size_raw, min_window_size) + curr_window_size_raw = np.minimum(curr_window_size_raw, image_size) + curr_window_size = int(round(curr_window_size_raw)) # () + + pen_state = data[stroke_idx, 0] + stroke_params = data[stroke_idx, 1:] # (8) + + x1y1, x2y2, width2, scaling2 = stroke_params[0:2], stroke_params[2:4], stroke_params[4], stroke_params[5] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width, width2], axis=0) # (2) + stroke_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1) # (8) + + next_width = stroke_params[4] + next_scaling = stroke_params[5] + next_window_size = next_scaling * curr_window_size_raw + next_window_size = np.maximum(next_window_size, min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_width = next_width * curr_window_size_raw / next_window_size + prev_scaling = next_scaling + prev_window_size = curr_window_size_raw + + f = stroke_params_proc.tolist() # (8) + f += [1.0, 1.0] + gt_stroke_img = draw(f) # (H, W), [0.0-stroke, 1.0-BG] + + gt_stroke_img_large = image_pasting_v3_testing(1.0 - gt_stroke_img, cursor_pos, + image_size, + curr_window_size_raw, + pasting_func, sess) # [0.0-BG, 1.0-stroke] + + if pen_state == 0: + canvas += gt_stroke_img_large # [0.0-BG, 1.0-stroke] + + canvas_rgb = np.stack([np.clip(canvas, 0.0, 1.0) for _ in range(3)], axis=-1) + + if add_box: + vis_inputs = np.expand_dims(canvas_rgb, axis=0) + vis_outputs = add_scaling_visualization(vis_inputs, cursor_pos, curr_window_size, image_size) + canvas_vis = vis_outputs[0] + else: + canvas_vis = canvas_rgb + + canvas_vis_png = Image.fromarray(canvas_vis, 'RGB') + gif_frames.append(canvas_vis_png) + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = stroke_params[2:4] * (float(curr_window_size_raw) / 2.0) # (1, 6), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[1:2], new_cursor_offset_next[0:1]], axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + + stroke_position_next = cursor_pos_large + new_cursor_offset_next # (2), large-level + + if cursor_type == 'next': + cursor_pos_large = stroke_position_next # (2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), float(image_size - 1)) # (2), large-level + cursor_pos = cursor_pos_large / float(image_size) + + print('Saving to GIF ...') + save_path = os.path.join(save_base, 'dynamic.gif') + first_frame = gif_frames[0] + first_frame.save(save_path, save_all=True, append_images=gif_frames, loop=0, duration=0.01) + + +def gif_making(npz_path): + assert npz_path != '' + + min_window_size = 32 + raster_size = 128 + + split_idx = npz_path.rfind('/') + if split_idx == -1: + file_base = './' + file_name = npz_path[:-4] + else: + file_base = npz_path[:npz_path.rfind('/')] + file_name = npz_path[npz_path.rfind('/') + 1: -4] + + gif_base = os.path.join(file_base, file_name) + os.makedirs(gif_base, exist_ok=True) + + # differentiable pasting graph + paste_v3_func = DiffPastingV3(raster_size) + + tfconfig = tf.ConfigProto() + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + data = np.load(npz_path, encoding='latin1', allow_pickle=True) + strokes_data = data['strokes_data'] + init_cursors = data['init_cursors'] + image_size = data['image_size'] + round_length = data['round_length'] + init_width = data['init_width'] + + if round_length.ndim == 0: + round_lengths = [round_length] + else: + round_lengths = round_length + + # print('round_lengths', round_lengths) + + make_gif(sess, paste_v3_func, + strokes_data, init_cursors, image_size, round_lengths, init_width, + gif_base, + min_window_size=min_window_size, raster_size=raster_size) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--file', '-f', type=str, default='', help="define a npz path") + args = parser.parse_args() + + gif_making(args.file) diff --git a/robot_painting/qmupd_vs/tools/svg_conversion.py b/robot_painting/qmupd_vs/tools/svg_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..ddeea7a14ece96a70419181897d78ee5ecd2aa0e --- /dev/null +++ b/robot_painting/qmupd_vs/tools/svg_conversion.py @@ -0,0 +1,242 @@ +import os +import argparse +import numpy as np +from xml.dom import minidom + + +def write_svg_1(path_list, img_size, save_path): + ''' A long curve consisting of several strokes forms a path. ''' + impl = minidom.getDOMImplementation() + + doc = impl.createDocument(None, None, None) + + rootElement = doc.createElement('svg') + rootElement.setAttribute('xmlns', 'http://www.w3.org/2000/svg') + rootElement.setAttribute('height', str(img_size)) + rootElement.setAttribute('width', str(img_size)) + + path_num = len(path_list) + for path_i in range(path_num): + path_items = path_list[path_i] + + assert len(path_items) > 0 + if len(path_items) == 1: + continue + + childElement = doc.createElement('path') + childElement.setAttribute('id', 'curve_' + str(path_i)) + childElement.setAttribute('stroke', '#000000') + childElement.setAttribute('stroke-width', '3.5') + childElement.setAttribute('stroke-linejoin', 'round') + childElement.setAttribute('stroke-linecap', 'round') + childElement.setAttribute('fill', 'none') + + command_str = '' + for stroke_i, stroke_item in enumerate(path_items): + if stroke_i == 0: + command_str += 'M ' + stroke_position = stroke_item[0] + command_str += str(stroke_position[0]) + ', ' + str(stroke_position[1]) + ' ' + else: + command_str += 'Q ' + ctrl_position, stroke_position, stroke_width = stroke_item[0], stroke_item[1], stroke_item[2] + + ctrl_position_0 = last_position[0] + (stroke_position[0] - last_position[0]) * ctrl_position[1] + ctrl_position_1 = last_position[1] + (stroke_position[1] - last_position[1]) * ctrl_position[0] + + command_str += str(ctrl_position_0) + ', ' + str(ctrl_position_1) + ', ' + \ + str(stroke_position[0]) + ', ' + str(stroke_position[1]) + ' ' + + last_position = stroke_position + + childElement.setAttribute('d', command_str) + rootElement.appendChild(childElement) + + doc.appendChild(rootElement) + + f = open(save_path, 'w') + doc.writexml(f, addindent=' ', newl='\n') + f.close() + + +def write_svg_2(path_list, img_size, save_path): + ''' A single stroke forms a path. ''' + impl = minidom.getDOMImplementation() + + doc = impl.createDocument(None, None, None) + + rootElement = doc.createElement('svg') + rootElement.setAttribute('xmlns', 'http://www.w3.org/2000/svg') + rootElement.setAttribute('height', str(img_size)) + rootElement.setAttribute('width', str(img_size)) + + path_num = len(path_list) + for path_i in range(path_num): + path_items = path_list[path_i] + + assert len(path_items) > 0 + if len(path_items) == 1: + continue + + for stroke_i, stroke_item in enumerate(path_items): + if stroke_i == 0: + last_position = stroke_item[0] + else: + childElement = doc.createElement('path') + childElement.setAttribute('id', 'curve_' + str(path_i)) + childElement.setAttribute('stroke', '#000000') + childElement.setAttribute('stroke-linejoin', 'round') + childElement.setAttribute('stroke-linecap', 'round') + childElement.setAttribute('fill', 'none') + + command_str = 'M ' + str(last_position[0]) + ', ' + str(last_position[1]) + ' ' + command_str += 'Q ' + + ctrl_position, stroke_position, stroke_width = stroke_item[0], stroke_item[1], stroke_item[2] + + ctrl_position_0 = last_position[0] + (stroke_position[0] - last_position[0]) * ctrl_position[1] + ctrl_position_1 = last_position[1] + (stroke_position[1] - last_position[1]) * ctrl_position[0] + + command_str += str(ctrl_position_0) + ', ' + str(ctrl_position_1) + ', ' + \ + str(stroke_position[0]) + ', ' + str(stroke_position[1]) + ' ' + + last_position = stroke_position + + childElement.setAttribute('d', command_str) + childElement.setAttribute('stroke-width', str(stroke_width * img_size / 1.66)) + rootElement.appendChild(childElement) + + doc.appendChild(rootElement) + + f = open(save_path, 'w') + doc.writexml(f, addindent=' ', newl='\n') + f.close() + + +def convert_strokes_to_svg(data, init_cursor, image_size, infer_lengths, init_width, save_path, svg_type, + cursor_type='next', min_window_size=32, raster_size=128): + """ + :param data: (N_strokes, 7): flag, x_c, y_c, dx, dy, r, ds + :return: + """ + cursor_idx = 0 + + absolute_strokes = [] + absolute_strokes_path = [] + + if init_cursor.ndim == 1: + init_cursor = [init_cursor] + + for round_idx in range(len(infer_lengths)): + round_length = infer_lengths[round_idx] + + cursor_pos = init_cursor[cursor_idx] # (2) + cursor_idx += 1 + + cursor_pos_large = cursor_pos * float(image_size) + + if len(absolute_strokes_path) > 0: + absolute_strokes.append(absolute_strokes_path) + absolute_strokes_path = [[cursor_pos_large]] + + prev_width = init_width + prev_scaling = 1.0 + prev_window_size = float(raster_size) # (1) + + for round_inner_i in range(round_length): + stroke_idx = np.sum(infer_lengths[:round_idx]).astype(np.int32) + round_inner_i + + curr_window_size_raw = prev_scaling * prev_window_size + curr_window_size_raw = np.maximum(curr_window_size_raw, min_window_size) + curr_window_size_raw = np.minimum(curr_window_size_raw, image_size) + # curr_window_size = int(round(curr_window_size_raw)) # () + + stroke_params = data[stroke_idx, 1:] # (6) + pen_state = data[stroke_idx, 0] + + next_width = stroke_params[4] + next_scaling = stroke_params[5] + + next_width_abs = next_width * curr_window_size_raw / float(image_size) + + prev_scaling = next_scaling + prev_window_size = curr_window_size_raw + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = stroke_params[2:4] * (float(curr_window_size_raw) / 2.0) # (1, 6), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[1:2], new_cursor_offset_next[0:1]], axis=-1) + cursor_pos_large = cursor_pos * float(image_size) + stroke_position_next = cursor_pos_large + new_cursor_offset_next # (2), large-level + + if pen_state == 0: + absolute_strokes_path.append([stroke_params[0:2], stroke_position_next, next_width_abs]) + else: + absolute_strokes.append(absolute_strokes_path) + absolute_strokes_path = [[stroke_position_next]] + + if cursor_type == 'next': + cursor_pos_large = stroke_position_next # (2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), float(image_size - 1)) # (2), large-level + cursor_pos = cursor_pos_large / float(image_size) + + absolute_strokes.append(absolute_strokes_path) + + if svg_type == 'cluster': + write_svg_1(absolute_strokes, image_size, save_path) + elif svg_type == 'single': + write_svg_2(absolute_strokes, image_size, save_path) + else: + raise Exception('Unknown svg_type', svg_type) + + +def data_convert_to_absolute(npz_path, svg_type): + assert npz_path != '' + assert svg_type in ['single', 'cluster'] + + min_window_size = 32 + raster_size = 128 + + split_idx = npz_path.rfind('/') + if split_idx == -1: + file_base = './' + file_name = npz_path[:-4] + else: + file_base = npz_path[:npz_path.rfind('/')] + file_name = npz_path[npz_path.rfind('/') + 1: -4] + + svg_data_base = os.path.join(file_base, file_name) + os.makedirs(svg_data_base, exist_ok=True) + + data = np.load(npz_path, encoding='latin1', allow_pickle=True) + strokes_data = data['strokes_data'] + init_cursors = data['init_cursors'] + image_size = data['image_size'] + round_length = data['round_length'] + init_width = data['init_width'] + + if round_length.ndim == 0: + round_lengths = [round_length] + else: + round_lengths = round_length + + save_path = os.path.join(svg_data_base, str(svg_type) + '.svg') + + convert_strokes_to_svg(strokes_data, init_cursors, image_size, round_lengths, init_width, + min_window_size=min_window_size, raster_size=raster_size, save_path=save_path, + svg_type=svg_type) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--file', '-f', type=str, default='', help="define a npz path") + parser.add_argument('--svg_type', '-st', type=str, choices=['single', 'cluster'], default='single', + help="svg type") + args = parser.parse_args() + + data_convert_to_absolute(args.file, args.svg_type) diff --git a/robot_painting/qmupd_vs/tools/visualize_drawing.py b/robot_painting/qmupd_vs/tools/visualize_drawing.py new file mode 100644 index 0000000000000000000000000000000000000000..2cf0add4b574e20b83583edfe4f72365b724c4a7 --- /dev/null +++ b/robot_painting/qmupd_vs/tools/visualize_drawing.py @@ -0,0 +1,223 @@ +import os +import sys +import argparse +import numpy as np +from PIL import Image +import tensorflow as tf + +sys.path.append('./') +from utils import get_colors, draw, image_pasting_v3_testing +from model_common_test import DiffPastingV3 + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + +def display_strokes_final(sess, pasting_func, data, init_cursor, image_size, infer_lengths, init_width, + save_base, + cursor_type='next', min_window_size=32, raster_size=128): + """ + :param data: (N_strokes, 9): flag, x0, y0, x1, y1, x2, y2, r0, r2 + :return: + """ + canvas = np.zeros((image_size, image_size), dtype=np.float32) # [0.0-BG, 1.0-stroke] + drawn_region = np.zeros_like(canvas) + overlap_region = np.zeros_like(canvas) + canvas_color_with_overlap = np.zeros((image_size, image_size, 3), dtype=np.float32) + canvas_color_wo_overlap = np.zeros((image_size, image_size, 3), dtype=np.float32) + canvas_color_with_moving = np.zeros((image_size, image_size, 3), dtype=np.float32) + + cursor_idx = 0 + + if init_cursor.ndim == 1: + init_cursor = [init_cursor] + + stroke_count = len(data) + color_rgb_set = get_colors(stroke_count) # list of (3,) in [0, 255] + color_idx = 0 + + valid_stroke_count = stroke_count - np.sum(data[:, 0]).astype(np.int32) + len(init_cursor) + valid_color_rgb_set = get_colors(valid_stroke_count) # list of (3,) in [0, 255] + valid_color_idx = -1 + + # print('Drawn stroke number', valid_stroke_count) + # print(' flag x1\t\t y1\t\t x2\t\t y2\t\t r2\t\t s2') + + for round_idx in range(len(infer_lengths)): + round_length = infer_lengths[round_idx] + + cursor_pos = init_cursor[cursor_idx] # (2) + cursor_idx += 1 + + prev_width = init_width + prev_scaling = 1.0 + prev_window_size = float(raster_size) # (1) + + for round_inner_i in range(round_length): + stroke_idx = np.sum(infer_lengths[:round_idx]).astype(np.int32) + round_inner_i + + curr_window_size_raw = prev_scaling * prev_window_size + curr_window_size_raw = np.maximum(curr_window_size_raw, min_window_size) + curr_window_size_raw = np.minimum(curr_window_size_raw, image_size) + + pen_state = data[stroke_idx, 0] + stroke_params = data[stroke_idx, 1:] # (8) + + x1y1, x2y2, width2, scaling2 = stroke_params[0:2], stroke_params[2:4], stroke_params[4], stroke_params[5] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width, width2], axis=0) # (2) + stroke_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1) # (8) + + next_width = stroke_params[4] + next_scaling = stroke_params[5] + next_window_size = next_scaling * curr_window_size_raw + next_window_size = np.maximum(next_window_size, min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_width = next_width * curr_window_size_raw / next_window_size + prev_scaling = next_scaling + prev_window_size = curr_window_size_raw + + f = stroke_params_proc.tolist() # (8) + f += [1.0, 1.0] + gt_stroke_img = draw(f) # (H, W), [0.0-stroke, 1.0-BG] + + gt_stroke_img_large = image_pasting_v3_testing(1.0 - gt_stroke_img, cursor_pos, + image_size, + curr_window_size_raw, + pasting_func, sess) # [0.0-BG, 1.0-stroke] + + is_overlap = False + + if pen_state == 0: + canvas += gt_stroke_img_large # [0.0-BG, 1.0-stroke] + + curr_drawn_stroke_region = np.zeros_like(gt_stroke_img_large) + curr_drawn_stroke_region[gt_stroke_img_large > 0.5] = 1 + intersection = drawn_region * curr_drawn_stroke_region + # regard stroke with >50% overlap area as overlaped stroke + if np.sum(intersection) / np.sum(curr_drawn_stroke_region) > 0.5: + # enlarge the stroke a bit for better visualization + overlap_region[gt_stroke_img_large > 0] += 1 + is_overlap = True + + drawn_region[gt_stroke_img_large > 0.5] = 1 + + color_rgb = color_rgb_set[color_idx] # (3) in [0, 255] + color_idx += 1 + + color_rgb = np.reshape(color_rgb, (1, 1, 3)).astype(np.float32) + color_stroke = np.expand_dims(gt_stroke_img_large, axis=-1) * (1.0 - color_rgb / 255.0) + canvas_color_with_moving = canvas_color_with_moving * np.expand_dims((1.0 - gt_stroke_img_large), + axis=-1) + color_stroke # (H, W, 3) + + if pen_state == 0: + valid_color_idx += 1 + + if pen_state == 0: + valid_color_rgb = valid_color_rgb_set[valid_color_idx] # (3) in [0, 255] + # valid_color_idx += 1 + + valid_color_rgb = np.reshape(valid_color_rgb, (1, 1, 3)).astype(np.float32) + valid_color_stroke = np.expand_dims(gt_stroke_img_large, axis=-1) * (1.0 - valid_color_rgb / 255.0) + canvas_color_with_overlap = canvas_color_with_overlap * np.expand_dims((1.0 - gt_stroke_img_large), + axis=-1) + valid_color_stroke # (H, W, 3) + if not is_overlap: + canvas_color_wo_overlap = canvas_color_wo_overlap * np.expand_dims((1.0 - gt_stroke_img_large), + axis=-1) + valid_color_stroke # (H, W, 3) + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = stroke_params[2:4] * (float(curr_window_size_raw) / 2.0) # (1, 6), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[1:2], new_cursor_offset_next[0:1]], axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + + stroke_position_next = cursor_pos_large + new_cursor_offset_next # (2), large-level + + if cursor_type == 'next': + cursor_pos_large = stroke_position_next # (2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), float(image_size - 1)) # (2), large-level + cursor_pos = cursor_pos_large / float(image_size) + + canvas_rgb = np.stack([np.clip(canvas, 0.0, 1.0) for _ in range(3)], axis=-1) + canvas_black = 255 - np.round(canvas_rgb * 255.0).astype(np.uint8) + canvas_color_with_overlap = 255 - np.round(canvas_color_with_overlap * 255.0).astype(np.uint8) + canvas_color_wo_overlap = 255 - np.round(canvas_color_wo_overlap * 255.0).astype(np.uint8) + canvas_color_with_moving = 255 - np.round(canvas_color_with_moving * 255.0).astype(np.uint8) + + canvas_black_png = Image.fromarray(canvas_black, 'RGB') + canvas_black_save_path = os.path.join(save_base, 'output_rendered.png') + canvas_black_png.save(canvas_black_save_path, 'PNG') + + canvas_color_png = Image.fromarray(canvas_color_with_overlap, 'RGB') + canvas_color_save_path = os.path.join(save_base, 'output_order_with_overlap.png') + canvas_color_png.save(canvas_color_save_path, 'PNG') + + canvas_color_wo_png = Image.fromarray(canvas_color_wo_overlap, 'RGB') + canvas_color_wo_save_path = os.path.join(save_base, 'output_order_wo_overlap.png') + canvas_color_wo_png.save(canvas_color_wo_save_path, 'PNG') + + canvas_color_m_png = Image.fromarray(canvas_color_with_moving, 'RGB') + canvas_color_m_save_path = os.path.join(save_base, 'output_order_with_moving.png') + canvas_color_m_png.save(canvas_color_m_save_path, 'PNG') + + +def visualize_drawing(npz_path): + assert npz_path != '' + + min_window_size = 32 + raster_size = 128 + + split_idx = npz_path.rfind('/') + if split_idx == -1: + file_base = './' + file_name = npz_path[:-4] + else: + file_base = npz_path[:npz_path.rfind('/')] + file_name = npz_path[npz_path.rfind('/') + 1: -4] + + regenerate_base = os.path.join(file_base, file_name) + os.makedirs(regenerate_base, exist_ok=True) + + # differentiable pasting graph + paste_v3_func = DiffPastingV3(raster_size) + + tfconfig = tf.ConfigProto() + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + data = np.load(npz_path, encoding='latin1', allow_pickle=True) + strokes_data = data['strokes_data'] + init_cursors = data['init_cursors'] + image_size = data['image_size'] + round_length = data['round_length'] + init_width = data['init_width'] + + if round_length.ndim == 0: + round_lengths = [round_length] + else: + round_lengths = round_length + + # print('round_lengths', round_lengths) + + print('Processing ...') + display_strokes_final(sess, paste_v3_func, + strokes_data, init_cursors, image_size, round_lengths, init_width, + regenerate_base, + min_window_size=min_window_size, raster_size=raster_size) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--file', '-f', type=str, default='', help="define a npz path") + args = parser.parse_args() + + visualize_drawing(args.file) diff --git a/robot_painting/qmupd_vs/train.py b/robot_painting/qmupd_vs/train.py new file mode 100644 index 0000000000000000000000000000000000000000..e00cb21fa7edaccd0ce8249b00831adf67ee4ed9 --- /dev/null +++ b/robot_painting/qmupd_vs/train.py @@ -0,0 +1,87 @@ +"""General-purpose training script for image-to-image translation. + +This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and +different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). +You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). + +It first creates model, dataset, and visualizer given the option. +It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. +The script supports continue/resume training. Use '--continue_train' to resume your previous training. + +Example: + Train a CycleGAN model: + python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan + Train a pix2pix model: + python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA + +See options/base_options.py and options/train_options.py for more training options. +See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md +See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md +""" +import time +from options.train_options import TrainOptions +from data import create_dataset +from models import create_model +from util.visualizer import Visualizer +import pdb + +if __name__ == '__main__': + start = time.time() + opt = TrainOptions().parse() # get training options + dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + dataset_size = len(dataset) # get the number of images in the dataset. + print('The number of training images = %d' % dataset_size) + + model = create_model(opt) # create a model given opt.model and other options + model.setup(opt) # regular setup: load and print networks; create schedulers + visualizer = Visualizer(opt) # create a visualizer that display/save images and plots + total_iters = 0 # the total number of training iterations + + #for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): # outer loop for different epochs; we save the model by , + + for epoch in range(opt.epoch_count, opt.niter_end + 1): + epoch_start_time = time.time() # timer for entire epoch + iter_data_time = time.time() # timer for data loading per iteration + epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch + model.update_process(epoch) + + for i, data in enumerate(dataset): # inner loop within one epoch + iter_start_time = time.time() # timer for computation per iteration + if total_iters % opt.print_freq == 0: + t_data = iter_start_time - iter_data_time + visualizer.reset() + total_iters += opt.batch_size + epoch_iter += opt.batch_size + model.set_input(data) # unpack data from dataset and apply preprocessing + model.optimize_parameters() # calculate loss functions, get gradients, update network weights + + if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file + save_result = total_iters % opt.update_html_freq == 0 + model.compute_visuals() + visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) + + if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk + losses = model.get_current_losses() + t_comp = (time.time() - iter_start_time) / opt.batch_size + if opt.model == 'cycle_gan': + processes = [model.process] + model.lambda_As + visualizer.print_current_losses_process(epoch, epoch_iter, losses, t_comp, t_data, processes) + else: + visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data) + if opt.display_id > 0: + visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) + + if total_iters % opt.save_latest_freq == 0: # cache our latest model every iterations + print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) + save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' + model.save_networks(save_suffix) + + iter_data_time = time.time() + if epoch % opt.save_epoch_freq == 0: # cache our model every epochs + print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) + model.save_networks('latest') + model.save_networks(epoch) + + print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) + model.update_learning_rate() # update learning rates at the end of every epoch. + + print('Total Time Taken: %d sec' % (time.time() - start)) \ No newline at end of file diff --git a/robot_painting/qmupd_vs/train_rough_photograph.py b/robot_painting/qmupd_vs/train_rough_photograph.py new file mode 100644 index 0000000000000000000000000000000000000000..6b59b94dba436ae0b3dc3feca036ca6f61129864 --- /dev/null +++ b/robot_painting/qmupd_vs/train_rough_photograph.py @@ -0,0 +1,342 @@ +import json +import os +import time +import numpy as np +import six +import tensorflow as tf +from PIL import Image +import argparse + +import model_common_train as sketch_image_model +from hyper_parameters import FLAGS, get_default_hparams_rough, get_default_hparams_normal +from utils import create_summary, save_model, reset_graph, load_checkpoint +from dataset_utils import load_dataset_training + +os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1' + +tf.logging.set_verbosity(tf.logging.INFO) + + +def should_save_log_img(step_): + if step_ % 500 == 0: + return True + else: + return False + + +def save_log_images(sess, model, data_set, save_root, step_num, curr_photo_prob, interpolate_type, save_num=10): + res_gap = (model.hps.image_size_large - model.hps.image_size_small) // (save_num - 1) + log_img_resolutions = [] + for ii in range(save_num - 1): + log_img_resolutions.append(model.hps.image_size_small + ii * res_gap) + log_img_resolutions.append(model.hps.image_size_large) + + for res_i in range(len(log_img_resolutions)): + resolution = log_img_resolutions[res_i] + + sub_save_root = os.path.join(save_root, 'res_' + str(resolution)) + os.makedirs(sub_save_root, exist_ok=True) + + input_photos, target_sketches, init_cursors, image_size_rand = \ + data_set.get_batch_from_memory(memory_idx=res_i, + fixed_image_size=resolution, + random_cursor=model.hps.random_cursor, + photo_prob=curr_photo_prob, + interpolate_type=interpolate_type) + # input_photos: (N, image_size, image_size, 3), [0-stroke, 1-BG] + # target_sketches: (N, image_size, image_size), [0-stroke, 1-BG] + # init_cursors: (N, 1, 2), in size [0.0, 1.0) + + input_photo_val = input_photos + + init_cursor_input = [init_cursors for _ in range(model.total_loop)] + init_cursor_input = np.concatenate(init_cursor_input, axis=0) + image_size_input = [image_size_rand for _ in range(model.total_loop)] + image_size_input = np.stack(image_size_input, axis=0) + + feed = { + model.init_cursor: init_cursor_input, + model.image_size: image_size_input, + model.init_width: [model.hps.min_width], + } + for loop_i in range(model.total_loop): + feed[model.input_photo_list[loop_i]] = input_photo_val + + raster_images_pred, raster_images_pred_rgb = sess.run([model.pred_raster_imgs, model.pred_raster_imgs_rgb], + feed) # (N, image_size, image_size), [0.0-stroke, 1.0-BG] + raster_images_pred = (np.array(raster_images_pred[0]) * 255.0).astype(np.uint8) + input_photo = (np.array(input_photo_val[0, :, :, :]) * 255.0).astype(np.uint8) + target_sketch = (np.array(target_sketches[0]) * 255.0).astype(np.uint8) + raster_images_pred_rgb = (np.array(raster_images_pred_rgb[0]) * 255.0).astype(np.uint8) + + pred_save_path = os.path.join(sub_save_root, str(step_num) + '.png') + input_save_path = os.path.join(sub_save_root, 'input.png') + target_save_path = os.path.join(sub_save_root, 'gt.png') + + pred_rgb_save_root = os.path.join(sub_save_root, 'rgb') + os.makedirs(pred_rgb_save_root, exist_ok=True) + pred_rgb_save_path = os.path.join(pred_rgb_save_root, str(step_num) + '.png') + + raster_images_pred = Image.fromarray(raster_images_pred, 'L') + raster_images_pred.save(pred_save_path, 'PNG') + input_photo = Image.fromarray(input_photo, 'RGB') + input_photo.save(input_save_path, 'PNG') + target_sketch = Image.fromarray(target_sketch, 'L') + target_sketch.save(target_save_path, 'PNG') + raster_images_pred_rgb = Image.fromarray(raster_images_pred_rgb, 'RGB') + raster_images_pred_rgb.save(pred_rgb_save_path, 'PNG') + + +def train(sess, train_model, eval_sample_model, train_set, valid_set, sub_log_root, sub_snapshot_root, sub_log_img_root): + # Setup summary writer. + summary_writer = tf.summary.FileWriter(sub_log_root) + + print('-' * 100) + + # Calculate trainable params. + t_vars = tf.trainable_variables() + count_t_vars = 0 + for var in t_vars: + num_param = np.prod(var.get_shape().as_list()) + count_t_vars += num_param + print('%s | shape: %s | num_param: %i' % (var.name, str(var.get_shape()), num_param)) + print('Total trainable variables %i.' % count_t_vars) + print('-' * 100) + + # main train loop + + hps = train_model.hps + start = time.time() + + # create saver + snapshot_save_vars = [var for var in tf.global_variables() + if 'raster_unit' not in var.op.name and 'VGG16' not in var.op.name] + saver = tf.train.Saver(var_list=snapshot_save_vars, max_to_keep=20) + + start_step = 1 + print('start_step', start_step) + + mean_perc_relu_losses = [0.0 for _ in range(len(hps.perc_loss_layers))] + + for _ in range(start_step, hps.num_steps + 1): + step = sess.run(train_model.global_step) # start from 0 + + count_step = min(step, hps.num_steps) + curr_learning_rate = ((hps.learning_rate - hps.min_learning_rate) * + (1 - count_step / hps.num_steps) ** hps.decay_power + hps.min_learning_rate) + + if hps.sn_loss_type == 'decreasing': + assert hps.decrease_stop_steps <= hps.num_steps + assert hps.stroke_num_loss_weight_end <= hps.stroke_num_loss_weight + curr_sn_k = (hps.stroke_num_loss_weight - hps.stroke_num_loss_weight_end) / float(hps.decrease_stop_steps) + curr_stroke_num_loss_weight = hps.stroke_num_loss_weight - count_step * curr_sn_k + curr_stroke_num_loss_weight = max(curr_stroke_num_loss_weight, hps.stroke_num_loss_weight_end) + elif hps.sn_loss_type == 'fixed': + curr_stroke_num_loss_weight = hps.stroke_num_loss_weight + elif hps.sn_loss_type == 'increasing': + curr_sn_k = hps.stroke_num_loss_weight / float(hps.num_steps - hps.increase_start_steps) + curr_stroke_num_loss_weight = max(count_step - hps.increase_start_steps, 0) * curr_sn_k + else: + raise Exception('Unknown sn_loss_type', hps.sn_loss_type) + + if hps.early_pen_loss_type == 'head': + curr_early_pen_k = (hps.max_seq_len - hps.early_pen_length) / float(hps.num_steps) + curr_early_pen_loss_len = count_step * curr_early_pen_k + hps.early_pen_length + + curr_early_pen_loss_start = 1 + curr_early_pen_loss_end = curr_early_pen_loss_len + elif hps.early_pen_loss_type == 'tail': + curr_early_pen_k = (hps.max_seq_len // 2 - 1) / float(hps.num_steps) + curr_early_pen_loss_len = count_step * curr_early_pen_k + hps.max_seq_len // 2 + + curr_early_pen_loss_end = hps.max_seq_len + curr_early_pen_loss_start = curr_early_pen_loss_end - curr_early_pen_loss_len + elif hps.early_pen_loss_type == 'move': + curr_early_pen_k = (hps.max_seq_len // 2 - 1) / float(hps.num_steps) + curr_early_pen_loss_len = count_step * curr_early_pen_k + hps.max_seq_len // 2 + + curr_early_pen_loss_start = hps.max_seq_len - curr_early_pen_loss_len + curr_early_pen_loss_end = curr_early_pen_loss_start + hps.max_seq_len // 2 + else: + raise Exception('Unknown early_pen_loss_type', hps.early_pen_loss_type) + curr_early_pen_loss_start = int(round(curr_early_pen_loss_start)) + curr_early_pen_loss_end = int(round(curr_early_pen_loss_end)) + + if hps.photo_prob_type == 'increasing' or hps.photo_prob_type == 'interpolate': + assert hps.photo_prob_end_step >= hps.photo_prob_start_step + curr_photo_prob_k = 1.0 / float(hps.photo_prob_end_step - hps.photo_prob_start_step) + curr_photo_prob = (count_step - hps.photo_prob_start_step) * curr_photo_prob_k + curr_photo_prob = max(0.0, curr_photo_prob) + curr_photo_prob = min(1.0, curr_photo_prob) + interpolate_type = 'prob' if hps.photo_prob_type == 'increasing' else 'image' + elif hps.photo_prob_type == 'zero': + curr_photo_prob = 0.0 + interpolate_type = 'prob' + elif hps.photo_prob_type == 'one': + curr_photo_prob = 1.0 + interpolate_type = 'prob' + else: + raise Exception('Unknown photo_prob_type', hps.photo_prob_type) + + input_photos, target_sketches, init_cursors, image_sizes = \ + train_set.get_batch_multi_res(loop_num=train_model.total_loop, + random_cursor=hps.random_cursor, + photo_prob=curr_photo_prob, + interpolate_type=interpolate_type) + # input_photos: list of (N, image_size, image_size, 3), [0-stroke, 1-BG] + # target_sketches: list of (N, image_size, image_size), [0-stroke, 1-BG] + # init_cursors: list of (N, 1, 2), in size [0.0, 1.0) + + init_cursors_input = np.concatenate(init_cursors, axis=0) + image_size_input = np.stack(image_sizes, axis=0) + + feed = { + train_model.init_cursor: init_cursors_input, + train_model.image_size: image_size_input, + train_model.init_width: [hps.min_width], + + train_model.lr: curr_learning_rate, + train_model.stroke_num_loss_weight: curr_stroke_num_loss_weight, + train_model.early_pen_loss_start_idx: curr_early_pen_loss_start, + train_model.early_pen_loss_end_idx: curr_early_pen_loss_end, + + train_model.last_step_num: float(step), + } + for layer_i in range(len(hps.perc_loss_layers)): + feed[train_model.perc_loss_mean_list[layer_i]] = mean_perc_relu_losses[layer_i] + + for loop_i in range(train_model.total_loop): + input_photo_val = input_photos[loop_i] + target_sketch_val = target_sketches[loop_i] + feed[train_model.input_photo_list[loop_i]] = input_photo_val + feed[train_model.target_sketch_list[loop_i]] = np.expand_dims(target_sketch_val, axis=-1) + + (train_cost, raster_cost, perc_relu_costs_raw, perc_relu_costs_norm, + stroke_num_cost, early_pen_states_cost, + pos_outside_cost, win_size_outside_cost, + train_step) = sess.run([ + train_model.cost, train_model.raster_cost, + train_model.perc_relu_losses_raw, train_model.perc_relu_losses_norm, + train_model.stroke_num_cost, + train_model.early_pen_states_cost, + train_model.pos_outside_cost, train_model.win_size_outside_cost, + train_model.global_step + ], feed) + + ## update mean_raster_loss + for layer_i in range(len(hps.perc_loss_layers)): + perc_relu_cost_raw = perc_relu_costs_raw[layer_i] + mean_perc_relu_loss = mean_perc_relu_losses[layer_i] + mean_perc_relu_loss = (mean_perc_relu_loss * step + perc_relu_cost_raw) / float(step + 1) + mean_perc_relu_losses[layer_i] = mean_perc_relu_loss + + _ = sess.run(train_model.train_op, feed) + + if step % 20 == 0 and step > 0: + end = time.time() + time_taken = end - start + + train_summary_map = { + 'Train_Cost': train_cost, + 'Train_raster_Cost': raster_cost, + 'Train_stroke_num_Cost': stroke_num_cost, + 'Train_early_pen_states_cost': early_pen_states_cost, + 'Train_pos_outside_Cost': pos_outside_cost, + 'Train_win_size_outside_Cost': win_size_outside_cost, + 'Learning_Rate': curr_learning_rate, + 'Time_Taken_Train': time_taken + } + for layer_i in range(len(hps.perc_loss_layers)): + layer_name = hps.perc_loss_layers[layer_i] + train_summary_map['Train_raster_Cost_' + layer_name] = perc_relu_costs_raw[layer_i] + + create_summary(summary_writer, train_summary_map, train_step) + + output_format = ('step: %d, lr: %.6f, ' + 'snw: %.3f, ' + 'cost: %.4f, ' + 'ras: %.4f, stroke_num: %.4f, early_pen: %.4f, ' + 'pos_outside: %.4f, win_outside: %.4f, ' + 'train_time_taken: %.1f') + output_values = (step, curr_learning_rate, + curr_stroke_num_loss_weight, + train_cost, + raster_cost, stroke_num_cost, early_pen_states_cost, + pos_outside_cost, win_size_outside_cost, + time_taken) + output_log = output_format % output_values + # print(output_log) + tf.logging.info(output_log) + start = time.time() + + if should_save_log_img(step) and step > 0: + save_log_images(sess, eval_sample_model, valid_set, sub_log_img_root, step, curr_photo_prob, interpolate_type) + + if step % hps.save_every == 0 and step > 0: + save_model(sess, saver, sub_snapshot_root, step) + + +def trainer(model_params): + np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True) + + print('Hyperparams:') + for key, val in six.iteritems(model_params.values()): + print('%s = %s' % (key, str(val))) + print('Loading data files.') + print('-' * 100) + + datasets = load_dataset_training(FLAGS.dataset_dir, model_params) + + sub_snapshot_root = os.path.join(FLAGS.snapshot_root, model_params.program_name) + sub_log_root = os.path.join(FLAGS.log_root, model_params.program_name) + sub_log_img_root = os.path.join(FLAGS.log_img_root, model_params.program_name) + + train_set = datasets[0] + valid_set = datasets[1] + train_model_params = datasets[2] + eval_sample_model_params = datasets[3] + + eval_sample_model_params.loop_per_gpu = 1 + eval_sample_model_params.batch_size = len(eval_sample_model_params.gpus) * eval_sample_model_params.loop_per_gpu + + reset_graph() + train_model = sketch_image_model.VirtualSketchingModel(train_model_params) + eval_sample_model = sketch_image_model.VirtualSketchingModel(eval_sample_model_params, reuse=True) + + tfconfig = tf.ConfigProto(allow_soft_placement=True) + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + load_checkpoint(sess, FLAGS.neural_renderer_path, ras_only=True) + if train_model_params.raster_loss_base_type == 'perceptual': + load_checkpoint(sess, FLAGS.perceptual_model_root, perceptual_only=True) + + # Write config file to json file. + os.makedirs(sub_log_root, exist_ok=True) + os.makedirs(sub_log_img_root, exist_ok=True) + os.makedirs(sub_snapshot_root, exist_ok=True) + with tf.gfile.Open(os.path.join(sub_snapshot_root, 'model_config.json'), 'w') as f: + json.dump(train_model_params.values(), f, indent=True) + + train(sess, train_model, eval_sample_model, train_set, valid_set, + sub_log_root, sub_snapshot_root, sub_log_img_root) + + +def main(dataset_type): + if dataset_type == 'rough': + model_params = get_default_hparams_rough() + elif dataset_type == 'face': + model_params = get_default_hparams_normal() + else: + raise Exception('Unknown dataset_type:', dataset_type) + + trainer(model_params) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', '-d', type=str, default='rough', choices=['rough', 'face'], help="The dataset type.") + args = parser.parse_args() + + main(args.data) diff --git a/robot_painting/qmupd_vs/train_vectorization.py b/robot_painting/qmupd_vs/train_vectorization.py new file mode 100644 index 0000000000000000000000000000000000000000..9242f813fcaf02f9968ec4f3e02a62ebc4bf6b95 --- /dev/null +++ b/robot_painting/qmupd_vs/train_vectorization.py @@ -0,0 +1,313 @@ +import json +import os +import time +import numpy as np +import six +import tensorflow as tf +from PIL import Image + +import model_common_train as sketch_vector_model +from hyper_parameters import FLAGS, get_default_hparams_clean +from utils import create_summary, save_model, reset_graph, load_checkpoint +from dataset_utils import load_dataset_training + +os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1' + +tf.logging.set_verbosity(tf.logging.INFO) + + +def should_save_log_img(step_): + if step_ % 500 == 0: + return True + else: + return False + + +def save_log_images(sess, model, data_set, save_root, step_num, save_num=10): + res_gap = (model.hps.image_size_large - model.hps.image_size_small) // (save_num - 1) + log_img_resolutions = [] + for ii in range(save_num - 1): + log_img_resolutions.append(model.hps.image_size_small + ii * res_gap) + log_img_resolutions.append(model.hps.image_size_large) + + for res_i in range(len(log_img_resolutions)): + resolution = log_img_resolutions[res_i] + + sub_save_root = os.path.join(save_root, 'res_' + str(resolution)) + os.makedirs(sub_save_root, exist_ok=True) + + input_photos, target_sketches, init_cursors, image_size_rand = \ + data_set.get_batch_from_memory(memory_idx=res_i, vary_thickness=model.hps.vary_thickness, + fixed_image_size=resolution, + random_cursor=model.hps.random_cursor, + init_cursor_on_undrawn_pixel=model.hps.init_cursor_on_undrawn_pixel) + # input_photos: (N, image_size, image_size), [0-stroke, 1-BG] + # target_sketches: (N, image_size, image_size), [0-stroke, 1-BG] + # init_cursors: (N, 1, 2), in size [0.0, 1.0) + + if input_photos is not None: + input_photo_val = np.expand_dims(input_photos, axis=-1) + else: + input_photo_val = np.expand_dims(target_sketches, axis=-1) + + init_cursor_input = [init_cursors for _ in range(model.total_loop)] + init_cursor_input = np.concatenate(init_cursor_input, axis=0) + image_size_input = [image_size_rand for _ in range(model.total_loop)] + image_size_input = np.stack(image_size_input, axis=0) + + feed = { + model.init_cursor: init_cursor_input, + model.image_size: image_size_input, + model.init_width: [model.hps.min_width], + } + for loop_i in range(model.total_loop): + feed[model.input_photo_list[loop_i]] = input_photo_val + + raster_images_pred, raster_images_pred_rgb = sess.run([model.pred_raster_imgs, model.pred_raster_imgs_rgb], + feed) # (N, image_size, image_size), [0.0-stroke, 1.0-BG] + raster_images_pred = (np.array(raster_images_pred[0]) * 255.0).astype(np.uint8) + input_sketch = (np.array(target_sketches[0]) * 255.0).astype(np.uint8) + raster_images_pred_rgb = (np.array(raster_images_pred_rgb[0]) * 255.0).astype(np.uint8) + + pred_save_path = os.path.join(sub_save_root, str(step_num) + '.png') + target_save_path = os.path.join(sub_save_root, 'gt.png') + + pred_rgb_save_root = os.path.join(sub_save_root, 'rgb') + os.makedirs(pred_rgb_save_root, exist_ok=True) + pred_rgb_save_path = os.path.join(pred_rgb_save_root, str(step_num) + '.png') + + raster_images_pred = Image.fromarray(raster_images_pred, 'L') + raster_images_pred.save(pred_save_path, 'PNG') + input_sketch = Image.fromarray(input_sketch, 'L') + input_sketch.save(target_save_path, 'PNG') + raster_images_pred_rgb = Image.fromarray(raster_images_pred_rgb, 'RGB') + raster_images_pred_rgb.save(pred_rgb_save_path, 'PNG') + + +def train(sess, train_model, eval_sample_model, train_set, val_set, sub_log_root, sub_snapshot_root, sub_log_img_root): + # Setup summary writer. + summary_writer = tf.summary.FileWriter(sub_log_root) + + print('-' * 100) + + # Calculate trainable params. + t_vars = tf.trainable_variables() + count_t_vars = 0 + for var in t_vars: + num_param = np.prod(var.get_shape().as_list()) + count_t_vars += num_param + print('%s | shape: %s | num_param: %i' % (var.name, str(var.get_shape()), num_param)) + print('Total trainable variables %i.' % count_t_vars) + print('-' * 100) + + # main train loop + + hps = train_model.hps + start = time.time() + + # create saver + snapshot_save_vars = [var for var in tf.global_variables() + if 'raster_unit' not in var.op.name and 'VGG16' not in var.op.name] + saver = tf.train.Saver(var_list=snapshot_save_vars, max_to_keep=20) + + start_step = 1 + print('start_step', start_step) + + mean_perc_relu_losses = [0.0 for _ in range(len(hps.perc_loss_layers))] + + for _ in range(start_step, hps.num_steps + 1): + step = sess.run(train_model.global_step) # start from 0 + + count_step = min(step, hps.num_steps) + curr_learning_rate = ((hps.learning_rate - hps.min_learning_rate) * + (1 - count_step / hps.num_steps) ** hps.decay_power + hps.min_learning_rate) + + if hps.sn_loss_type == 'decreasing': + assert hps.decrease_stop_steps <= hps.num_steps + assert hps.stroke_num_loss_weight_end <= hps.stroke_num_loss_weight + curr_sn_k = (hps.stroke_num_loss_weight - hps.stroke_num_loss_weight_end) / float(hps.decrease_stop_steps) + curr_stroke_num_loss_weight = hps.stroke_num_loss_weight - count_step * curr_sn_k + curr_stroke_num_loss_weight = max(curr_stroke_num_loss_weight, hps.stroke_num_loss_weight_end) + elif hps.sn_loss_type == 'fixed': + curr_stroke_num_loss_weight = hps.stroke_num_loss_weight + elif hps.sn_loss_type == 'increasing': + curr_sn_k = hps.stroke_num_loss_weight / float(hps.num_steps - hps.increase_start_steps) + curr_stroke_num_loss_weight = max(count_step - hps.increase_start_steps, 0) * curr_sn_k + else: + raise Exception('Unknown sn_loss_type', hps.sn_loss_type) + + if hps.early_pen_loss_type == 'head': + curr_early_pen_k = (hps.max_seq_len - hps.early_pen_length) / float(hps.num_steps) + curr_early_pen_loss_len = count_step * curr_early_pen_k + hps.early_pen_length + + curr_early_pen_loss_start = 1 + curr_early_pen_loss_end = curr_early_pen_loss_len + elif hps.early_pen_loss_type == 'tail': + curr_early_pen_k = (hps.max_seq_len // 2 - 1) / float(hps.num_steps) + curr_early_pen_loss_len = count_step * curr_early_pen_k + hps.max_seq_len // 2 + + curr_early_pen_loss_end = hps.max_seq_len + curr_early_pen_loss_start = curr_early_pen_loss_end - curr_early_pen_loss_len + elif hps.early_pen_loss_type == 'move': + curr_early_pen_k = (hps.max_seq_len // 2 - 1) / float(hps.num_steps) + curr_early_pen_loss_len = count_step * curr_early_pen_k + hps.max_seq_len // 2 + + curr_early_pen_loss_start = hps.max_seq_len - curr_early_pen_loss_len + curr_early_pen_loss_end = curr_early_pen_loss_start + hps.max_seq_len // 2 + else: + raise Exception('Unknown early_pen_loss_type', hps.early_pen_loss_type) + curr_early_pen_loss_start = int(round(curr_early_pen_loss_start)) + curr_early_pen_loss_end = int(round(curr_early_pen_loss_end)) + + input_photos, target_sketches, init_cursors, image_sizes = \ + train_set.get_batch_multi_res(loop_num=train_model.total_loop, vary_thickness=hps.vary_thickness, + random_cursor=hps.random_cursor, + init_cursor_on_undrawn_pixel=hps.init_cursor_on_undrawn_pixel) + # input_photos: list of (N, image_size, image_size), [0-stroke, 1-BG] + # target_sketches: list of (N, image_size, image_size), [0-stroke, 1-BG] + # init_cursors: list of (N, 1, 2), in size [0.0, 1.0) + + init_cursors_input = np.concatenate(init_cursors, axis=0) + image_size_input = np.stack(image_sizes, axis=0) + + feed = { + train_model.init_cursor: init_cursors_input, + train_model.image_size: image_size_input, + train_model.init_width: [hps.min_width], + + train_model.lr: curr_learning_rate, + train_model.stroke_num_loss_weight: curr_stroke_num_loss_weight, + train_model.early_pen_loss_start_idx: curr_early_pen_loss_start, + train_model.early_pen_loss_end_idx: curr_early_pen_loss_end, + + train_model.last_step_num: float(step), + } + for layer_i in range(len(hps.perc_loss_layers)): + feed[train_model.perc_loss_mean_list[layer_i]] = mean_perc_relu_losses[layer_i] + + for loop_i in range(train_model.total_loop): + if input_photos is not None: + input_photo_val = np.expand_dims(input_photos[loop_i], axis=-1) + else: + input_photo_val = np.expand_dims(target_sketches[loop_i], axis=-1) + feed[train_model.input_photo_list[loop_i]] = input_photo_val + + (train_cost, raster_cost, perc_relu_costs_raw, perc_relu_costs_norm, + stroke_num_cost, early_pen_states_cost, + pos_outside_cost, win_size_outside_cost, + train_step) = sess.run([ + train_model.cost, train_model.raster_cost, + train_model.perc_relu_losses_raw, train_model.perc_relu_losses_norm, + train_model.stroke_num_cost, + train_model.early_pen_states_cost, + train_model.pos_outside_cost, train_model.win_size_outside_cost, + train_model.global_step + ], feed) + + ## update mean_raster_loss + for layer_i in range(len(hps.perc_loss_layers)): + perc_relu_cost_raw = perc_relu_costs_raw[layer_i] + mean_perc_relu_loss = mean_perc_relu_losses[layer_i] + mean_perc_relu_loss = (mean_perc_relu_loss * step + perc_relu_cost_raw) / float(step + 1) + mean_perc_relu_losses[layer_i] = mean_perc_relu_loss + + _ = sess.run(train_model.train_op, feed) + + if step % 20 == 0 and step > 0: + end = time.time() + time_taken = end - start + + train_summary_map = { + 'Train_Cost': train_cost, + 'Train_raster_Cost': raster_cost, + 'Train_stroke_num_Cost': stroke_num_cost, + 'Train_early_pen_states_cost': early_pen_states_cost, + 'Train_pos_outside_Cost': pos_outside_cost, + 'Train_win_size_outside_Cost': win_size_outside_cost, + 'Learning_Rate': curr_learning_rate, + 'Time_Taken_Train': time_taken + } + for layer_i in range(len(hps.perc_loss_layers)): + layer_name = hps.perc_loss_layers[layer_i] + train_summary_map['Train_raster_Cost_' + layer_name] = perc_relu_costs_raw[layer_i] + + create_summary(summary_writer, train_summary_map, train_step) + + output_format = ('step: %d, lr: %.6f, ' + 'snw: %.3f, ' + 'cost: %.4f, ' + 'ras: %.4f, stroke_num: %.4f, early_pen: %.4f, ' + 'pos_outside: %.4f, win_outside: %.4f, ' + 'train_time_taken: %.1f') + output_values = (step, curr_learning_rate, + curr_stroke_num_loss_weight, + train_cost, + raster_cost, stroke_num_cost, early_pen_states_cost, + pos_outside_cost, win_size_outside_cost, + time_taken) + output_log = output_format % output_values + # print(output_log) + tf.logging.info(output_log) + start = time.time() + + if should_save_log_img(step) and step > 0: + save_log_images(sess, eval_sample_model, val_set, sub_log_img_root, step) + + if step % hps.save_every == 0 and step > 0: + save_model(sess, saver, sub_snapshot_root, step) + + +def trainer(model_params): + np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True) + + print('Hyperparams:') + for key, val in six.iteritems(model_params.values()): + print('%s = %s' % (key, str(val))) + print('Loading data files.') + print('-' * 100) + + datasets = load_dataset_training(FLAGS.dataset_dir, model_params) + + sub_snapshot_root = os.path.join(FLAGS.snapshot_root, model_params.program_name) + sub_log_root = os.path.join(FLAGS.log_root, model_params.program_name) + sub_log_img_root = os.path.join(FLAGS.log_img_root, model_params.program_name) + + train_set = datasets[0] + val_set = datasets[1] + train_model_params = datasets[2] + eval_sample_model_params = datasets[3] + + eval_sample_model_params.loop_per_gpu = 1 + eval_sample_model_params.batch_size = len(eval_sample_model_params.gpus) * eval_sample_model_params.loop_per_gpu + + reset_graph() + train_model = sketch_vector_model.VirtualSketchingModel(train_model_params) + eval_sample_model = sketch_vector_model.VirtualSketchingModel(eval_sample_model_params, reuse=True) + + tfconfig = tf.ConfigProto(allow_soft_placement=True) + tfconfig.gpu_options.allow_growth = True + sess = tf.InteractiveSession(config=tfconfig) + sess.run(tf.global_variables_initializer()) + + load_checkpoint(sess, FLAGS.neural_renderer_path, ras_only=True) + if train_model_params.raster_loss_base_type == 'perceptual': + load_checkpoint(sess, FLAGS.perceptual_model_root, perceptual_only=True) + + # Write config file to json file. + os.makedirs(sub_log_root, exist_ok=True) + os.makedirs(sub_log_img_root, exist_ok=True) + os.makedirs(sub_snapshot_root, exist_ok=True) + with tf.gfile.Open(os.path.join(sub_snapshot_root, 'model_config.json'), 'w') as f: + json.dump(train_model_params.values(), f, indent=True) + + train(sess, train_model, eval_sample_model, train_set, val_set, + sub_log_root, sub_snapshot_root, sub_log_img_root) + + +def main(): + model_params = get_default_hparams_clean() + trainer(model_params) + + +if __name__ == '__main__': + main() diff --git a/robot_painting/qmupd_vs/util/__init__.py b/robot_painting/qmupd_vs/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae36f63d8859ec0c60dcbfe67c4ac324e751ddf7 --- /dev/null +++ b/robot_painting/qmupd_vs/util/__init__.py @@ -0,0 +1 @@ +"""This package includes a miscellaneous collection of useful helper functions.""" diff --git a/robot_painting/qmupd_vs/util/get_data.py b/robot_painting/qmupd_vs/util/get_data.py new file mode 100644 index 0000000000000000000000000000000000000000..97edc3ce3c3ab6d6080dca34e73a5fb77bb715fb --- /dev/null +++ b/robot_painting/qmupd_vs/util/get_data.py @@ -0,0 +1,110 @@ +from __future__ import print_function +import os +import tarfile +import requests +from warnings import warn +from zipfile import ZipFile +from bs4 import BeautifulSoup +from os.path import abspath, isdir, join, basename + + +class GetData(object): + """A Python script for downloading CycleGAN or pix2pix datasets. + + Parameters: + technique (str) -- One of: 'cyclegan' or 'pix2pix'. + verbose (bool) -- If True, print additional information. + + Examples: + >>> from util.get_data import GetData + >>> gd = GetData(technique='cyclegan') + >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. + + Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh' + and 'scripts/download_cyclegan_model.sh'. + """ + + def __init__(self, technique='cyclegan', verbose=True): + url_dict = { + 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/', + 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' + } + self.url = url_dict.get(technique.lower()) + self._verbose = verbose + + def _print(self, text): + if self._verbose: + print(text) + + @staticmethod + def _get_options(r): + soup = BeautifulSoup(r.text, 'lxml') + options = [h.text for h in soup.find_all('a', href=True) + if h.text.endswith(('.zip', 'tar.gz'))] + return options + + def _present_options(self): + r = requests.get(self.url) + options = self._get_options(r) + print('Options:\n') + for i, o in enumerate(options): + print("{0}: {1}".format(i, o)) + choice = input("\nPlease enter the number of the " + "dataset above you wish to download:") + return options[int(choice)] + + def _download_data(self, dataset_url, save_path): + if not isdir(save_path): + os.makedirs(save_path) + + base = basename(dataset_url) + temp_save_path = join(save_path, base) + + with open(temp_save_path, "wb") as f: + r = requests.get(dataset_url) + f.write(r.content) + + if base.endswith('.tar.gz'): + obj = tarfile.open(temp_save_path) + elif base.endswith('.zip'): + obj = ZipFile(temp_save_path, 'r') + else: + raise ValueError("Unknown File Type: {0}.".format(base)) + + self._print("Unpacking Data...") + obj.extractall(save_path) + obj.close() + os.remove(temp_save_path) + + def get(self, save_path, dataset=None): + """ + + Download a dataset. + + Parameters: + save_path (str) -- A directory to save the data to. + dataset (str) -- (optional). A specific dataset to download. + Note: this must include the file extension. + If None, options will be presented for you + to choose from. + + Returns: + save_path_full (str) -- the absolute path to the downloaded data. + + """ + if dataset is None: + selected_dataset = self._present_options() + else: + selected_dataset = dataset + + save_path_full = join(save_path, selected_dataset.split('.')[0]) + + if isdir(save_path_full): + warn("\n'{0}' already exists. Voiding Download.".format( + save_path_full)) + else: + self._print('Downloading Data...') + url = "{0}/{1}".format(self.url, selected_dataset) + self._download_data(url, save_path=save_path) + + return abspath(save_path_full) diff --git a/robot_painting/qmupd_vs/util/html.py b/robot_painting/qmupd_vs/util/html.py new file mode 100644 index 0000000000000000000000000000000000000000..55b03eb181f5c4d48bf1d39d07e0cf92834d034a --- /dev/null +++ b/robot_painting/qmupd_vs/util/html.py @@ -0,0 +1,91 @@ +import dominate +from dominate.tags import meta, h3, table, tr, td, p, a, img, br +import os + + +class HTML: + """This HTML class allows us to save images and write texts into a single HTML file. + + It consists of functions such as (add a text header to the HTML file), + (add a row of images to the HTML file), and (save the HTML to the disk). + It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. + """ + + def __init__(self, web_dir, title, refresh=0, folder='images'): + """Initialize the HTML classes + + Parameters: + web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: + with self.doc.head: + meta(http_equiv="refresh", content=str(refresh)) + + def get_image_dir(self): + """Return the directory that stores images""" + return self.img_dir + + def add_header(self, text): + """Insert a header to the HTML file + + Parameters: + text (str) -- the header text + """ + with self.doc: + h3(text) + + def add_images(self, ims, txts, links, width=400): + """add images to the HTML file + + Parameters: + ims (str list) -- a list of image paths + txts (str list) -- a list of image names shown on the website + links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page + """ + self.t = table(border=1, style="table-layout: fixed;") # Insert a table + self.doc.add(self.t) + with self.t: + with tr(): + for im, txt, link in zip(ims, txts, links): + with td(style="word-wrap: break-word;", halign="center", valign="top"): + with p(): + with a(href=os.path.join('images', link)): + #img(style="width:%dpx" % width, src=os.path.join('images', im)) + img(style="width:%dpx" % width, src=os.path.join(self.folder, im)) + br() + p(txt) + + def save(self): + """save the current content to the HMTL file""" + #html_file = '%s/index.html' % self.web_dir + name = self.folder[6:] if self.folder[:6] == 'images' else self.folder + html_file = '%s/index%s.html' % (self.web_dir, name) + f = open(html_file, 'wt') + f.write(self.doc.render()) + f.close() + + +if __name__ == '__main__': # we show an example usage here. + html = HTML('web/', 'test_html') + html.add_header('hello world') + + ims, txts, links = [], [], [] + for n in range(4): + ims.append('image_%d.png' % n) + txts.append('text_%d' % n) + links.append('image_%d.png' % n) + html.add_images(ims, txts, links) + html.save() diff --git a/robot_painting/qmupd_vs/util/image_pool.py b/robot_painting/qmupd_vs/util/image_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..6d086f882bc3d1b90c529fce6cddaaa75f2005d7 --- /dev/null +++ b/robot_painting/qmupd_vs/util/image_pool.py @@ -0,0 +1,54 @@ +import random +import torch + + +class ImagePool(): + """This class implements an image buffer that stores previously generated images. + + This buffer enables us to update discriminators using a history of generated images + rather than the ones produced by the latest generators. + """ + + def __init__(self, pool_size): + """Initialize the ImagePool class + + Parameters: + pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created + """ + self.pool_size = pool_size + if self.pool_size > 0: # create an empty pool + self.num_imgs = 0 + self.images = [] + + def query(self, images): + """Return an image from the pool. + + Parameters: + images: the latest generated images from the generator + + Returns images from the buffer. + + By 50/100, the buffer will return input images. + By 50/100, the buffer will return images previously stored in the buffer, + and insert the current images to the buffer. + """ + if self.pool_size == 0: # if the buffer size is 0, do nothing + return images + return_images = [] + for image in images: + image = torch.unsqueeze(image.data, 0) + if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer + self.num_imgs = self.num_imgs + 1 + self.images.append(image) + return_images.append(image) + else: + p = random.uniform(0, 1) + if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer + random_id = random.randint(0, self.pool_size - 1) # randint is inclusive + tmp = self.images[random_id].clone() + self.images[random_id] = image + return_images.append(tmp) + else: # by another 50% chance, the buffer will return the current image + return_images.append(image) + return_images = torch.cat(return_images, 0) # collect all the images and return + return return_images diff --git a/robot_painting/qmupd_vs/util/util.py b/robot_painting/qmupd_vs/util/util.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb88e42810d605fd0dab3681941567b1deadd7a --- /dev/null +++ b/robot_painting/qmupd_vs/util/util.py @@ -0,0 +1,134 @@ +"""This module contains simple helper functions """ +from __future__ import print_function +import torch +import numpy as np +from PIL import Image +import os +import pdb +from scipy.io import savemat + + +def tensor2im(input_image, imtype=np.uint8): + """"Converts a Tensor array into a numpy image array. + + Parameters: + input_image (tensor) -- the input image tensor array + imtype (type) -- the desired type of the converted numpy array + """ + if not isinstance(input_image, np.ndarray): + if isinstance(input_image, torch.Tensor): # get the data from a variable + image_tensor = input_image.data + else: + return input_image + #pdb.set_trace() + image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array + if image_numpy.shape[0] == 1: # grayscale to RGB + image_numpy = np.tile(image_numpy, (3, 1, 1)) + elif image_numpy.shape[0] == 2: + image_numpy = np.concatenate([image_numpy, image_numpy[1:2,:,:]], 0) + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling + else: # if it is a numpy array, do nothing + image_numpy = input_image + return image_numpy.astype(imtype) + #return np.round(image_numpy).astype(imtype),image_numpy + +def tensor2im2(input_image, imtype=np.uint8): + """"Converts a Tensor array into a numpy image array. + + Parameters: + input_image (tensor) -- the input image tensor array + imtype (type) -- the desired type of the converted numpy array + """ + if not isinstance(input_image, np.ndarray): + if isinstance(input_image, torch.Tensor): # get the data from a variable + image_tensor = input_image.data + else: + return input_image + #pdb.set_trace() + image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array + if image_numpy.shape[0] == 1: # grayscale to RGB + image_numpy = np.tile(image_numpy, (3, 1, 1)) + elif image_numpy.shape[0] == 2: + image_numpy = np.concatenate([image_numpy, image_numpy[1:2,:,:]], 0) + image_numpy = np.transpose(image_numpy, (1, 2, 0)) + image_numpy[:,:,0] = image_numpy[:,:,0] * 0.229 + 0.485 + image_numpy[:,:,1] = image_numpy[:,:,1] * 0.224 + 0.456 + image_numpy[:,:,2] = image_numpy[:,:,2] * 0.225 + 0.406 + image_numpy = image_numpy * 255.0 # post-processing: tranpose and scaling + else: # if it is a numpy array, do nothing + image_numpy = input_image + return image_numpy.astype(imtype) + + +def diagnose_network(net, name='network'): + """Calculate and print the mean of average absolute(gradients) + + Parameters: + net (torch network) -- Torch network + name (str) -- the name of the network + """ + mean = 0.0 + count = 0 + for param in net.parameters(): + if param.grad is not None: + mean += torch.mean(torch.abs(param.grad.data)) + count += 1 + if count > 0: + mean = mean / count + print(name) + print(mean) + + +def save_image(image_numpy, image_path): + """Save a numpy image to the disk + + Parameters: + image_numpy (numpy array) -- input numpy array + image_path (str) -- the path of the image + """ + image_pil = Image.fromarray(image_numpy) + #pdb.set_trace() + image_pil.save(image_path) + + +def print_numpy(x, val=True, shp=False): + """Print the mean, min, max, median, std, and size of a numpy array + + Parameters: + val (bool) -- if print the values of the numpy array + shp (bool) -- if print the shape of the numpy array + """ + x = x.astype(np.float64) + if shp: + print('shape,', x.shape) + if val: + x = x.flatten() + print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( + np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) + + +def mkdirs(paths): + """create empty directories if they don't exist + + Parameters: + paths (str list) -- a list of directory paths + """ + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + + +def mkdir(path): + """create a single empty directory if it didn't exist + + Parameters: + path (str) -- a single directory path + """ + if not os.path.exists(path): + os.makedirs(path) + +def normalize_tensor(in_feat,eps=1e-10): + norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True)) + return in_feat/(norm_factor+eps) \ No newline at end of file diff --git a/robot_painting/qmupd_vs/util/visualizer.py b/robot_painting/qmupd_vs/util/visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..a05c141b5c361e80ecf824ad04ec1d55e3f7541a --- /dev/null +++ b/robot_painting/qmupd_vs/util/visualizer.py @@ -0,0 +1,263 @@ +import numpy as np +import os +import sys +import ntpath +import time +from . import util, html +from subprocess import Popen, PIPE +#from scipy.misc import imresize +from PIL import Image +import pdb +#from scipy.io import savemat + +if sys.version_info[0] == 2: + VisdomExceptionBase = Exception +else: + VisdomExceptionBase = ConnectionError + + +def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): + """Save images to the disk. + + Parameters: + webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) + visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs + image_path (str) -- the string is used to create image paths + aspect_ratio (float) -- the aspect ratio of saved images + width (int) -- the images will be resized to width x width + + This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. + """ + image_dir = webpage.get_image_dir() + short_path = ntpath.basename(image_path[0]) + name = os.path.splitext(short_path)[0] + + webpage.add_header(name) + ims, txts, links = [], [], [] + + for label, im_data in visuals.items(): + ## tensor to im + im = util.tensor2im(im_data) + #im = util.tensor2im2(im_data) + ## save mat + #im,imo = util.tensor2im(im_data) + #matname = os.path.join(image_dir, '%s_%s.mat' % (name, label)) + #savemat(matname,{'imo':imo}) + image_name = '%s_%s.png' % (name, label) + save_path = os.path.join(image_dir, image_name) + h, w, _ = im.shape + if aspect_ratio > 1.0: + #im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic') + im = np.array(Image.fromarray(im).resize((int(w * aspect_ratio), h), Image.BICUBIC)) + if aspect_ratio < 1.0: + #im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic') + im = np.array(Image.fromarray(im).resize((w, int(h / aspect_ratio)), Image.BICUBIC)) + util.save_image(im, save_path) + + ims.append(image_name) + txts.append(label) + links.append(image_name) + webpage.add_images(ims, txts, links, width=width) + + +class Visualizer(): + """This class includes several functions that can display/save images and print/save logging information. + + It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. + """ + + def __init__(self, opt): + """Initialize the Visualizer class + + Parameters: + opt -- stores all the experiment flags; needs to be a subclass of BaseOptions + Step 1: Cache the training/test options + Step 2: connect to a visdom server + Step 3: create an HTML object for saveing HTML filters + Step 4: create a logging file to store training losses + """ + self.opt = opt # cache the option + self.display_id = opt.display_id + self.use_html = opt.isTrain and not opt.no_html + self.win_size = opt.display_winsize + self.name = opt.name + self.port = opt.display_port + self.saved = False + if self.display_id > 0: # connect to a visdom server given and + import visdom + self.ncols = opt.display_ncols + self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env) + if not self.vis.check_connection(): + self.create_visdom_connections() + + if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ + self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') + self.img_dir = os.path.join(self.web_dir, 'images') + print('create web directory %s...' % self.web_dir) + util.mkdirs([self.web_dir, self.img_dir]) + # create a logging file to store training losses + self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') + with open(self.log_name, "a") as log_file: + now = time.strftime("%c") + log_file.write('================ Training Loss (%s) ================\n' % now) + + def reset(self): + """Reset the self.saved status""" + self.saved = False + + def create_visdom_connections(self): + """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ + cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port + print('\n\nCould not connect to Visdom server. \n Trying to start a server....') + print('Command: %s' % cmd) + Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) + + def display_current_results(self, visuals, epoch, save_result): + """Display current results on visdom; save current results to an HTML file. + + Parameters: + visuals (OrderedDict) - - dictionary of images to display or save + epoch (int) - - the current epoch + save_result (bool) - - if save the current results to an HTML file + """ + if self.display_id > 0: # show images in the browser using visdom + ncols = self.ncols + if ncols > 0: # show all the images in one visdom panel + ncols = min(ncols, len(visuals)) + h, w = next(iter(visuals.values())).shape[:2] + table_css = """""" % (w, h) # create a table css + # create a table of images. + title = self.name + label_html = '' + label_html_row = '' + images = [] + idx = 0 + for label, image in visuals.items(): + #image_numpy = util.tensor2im(image) + image_numpy = util.tensor2im2(image) + label_html_row += '%s' % label + #pdb.set_trace() + images.append(image_numpy.transpose([2, 0, 1])) + idx += 1 + if idx % ncols == 0: + label_html += '%s' % label_html_row + label_html_row = '' + white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 + while idx % ncols != 0: + images.append(white_image) + label_html_row += '' + idx += 1 + if label_html_row != '': + label_html += '%s' % label_html_row + try: + self.vis.images(images, nrow=ncols, win=self.display_id + 1, + padding=2, opts=dict(title=title + ' images')) + label_html = '%s
' % label_html + self.vis.text(table_css + label_html, win=self.display_id + 2, + opts=dict(title=title + ' labels')) + except VisdomExceptionBase: + self.create_visdom_connections() + + else: # show each image in a separate visdom panel; + idx = 1 + try: + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), + win=self.display_id + idx) + idx += 1 + except VisdomExceptionBase: + self.create_visdom_connections() + + if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. + self.saved = True + # save images to the disk + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) + util.save_image(image_numpy, img_path) + + # update website + webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1) + for n in range(epoch, 0, -1): + webpage.add_header('epoch [%d]' % n) + ims, txts, links = [], [], [] + + for label, image_numpy in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = 'epoch%.3d_%s.png' % (n, label) + ims.append(img_path) + txts.append(label) + links.append(img_path) + webpage.add_images(ims, txts, links, width=self.win_size) + webpage.save() + + def plot_current_losses(self, epoch, counter_ratio, losses): + """display the current losses on visdom display: dictionary of error labels and values + + Parameters: + epoch (int) -- current epoch + counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + """ + if not hasattr(self, 'plot_data'): + self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())} + self.plot_data['X'].append(epoch + counter_ratio) + self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']]) + #X = np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1) + #Y = np.array(self.plot_data['Y']) + #pdb.set_trace() + try: + self.vis.line( + X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1), + Y=np.array(self.plot_data['Y']), + opts={ + 'title': self.name + ' loss over time', + 'legend': self.plot_data['legend'], + 'xlabel': 'epoch', + 'ylabel': 'loss'}, + win=self.display_id) + except VisdomExceptionBase: + self.create_visdom_connections() + + # losses: same format as |losses| of plot_current_losses + def print_current_losses(self, epoch, iters, losses, t_comp, t_data): + """print current losses on console; also save the losses to the disk + + Parameters: + epoch (int) -- current epoch + iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + t_comp (float) -- computational time per data point (normalized by batch_size) + t_data (float) -- data loading time per data point (normalized by batch_size) + """ + message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) + for k, v in losses.items(): + message += '%s: %.3f ' % (k, v) + + print(message) # print the message + with open(self.log_name, "a") as log_file: + log_file.write('%s\n' % message) # save the message + + # losses: same format as |losses| of plot_current_losses + def print_current_losses_process(self, epoch, iters, losses, t_comp, t_data, processes): + """print current losses on console; also save the losses to the disk + + Parameters: + epoch (int) -- current epoch + iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + t_comp (float) -- computational time per data point (normalized by batch_size) + t_data (float) -- data loading time per data point (normalized by batch_size) + """ + message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) + message += '[process: %.3f, non_trunc: %.3f, trunc: %.3f] ' % (processes[0], processes[1], processes[2]) + for k, v in losses.items(): + message += '%s: %.3f ' % (k, v) + + print(message) # print the message + with open(self.log_name, "a") as log_file: + log_file.write('%s\n' % message) # save the message diff --git a/robot_painting/qmupd_vs/utils.py b/robot_painting/qmupd_vs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7a418df19d0383f99e00f99c41ece270deaa73 --- /dev/null +++ b/robot_painting/qmupd_vs/utils.py @@ -0,0 +1,397 @@ +import os +import cv2 +import json +import numpy as np +import tensorflow as tf +from PIL import Image +import matplotlib.pyplot as plt + + +############################################# +# Tensorflow utils +############################################# + +def reset_graph(): + """Closes the current default session and resets the graph.""" + sess = tf.compat.v1.get_default_session() + if sess: + sess.close() + tf.compat.v1.reset_default_graph() + + +def load_checkpoint(sess, checkpoint_path, ras_only=False, perceptual_only=False, gen_model_pretrain=False, + train_entire=False): + if ras_only: + load_var = {var.op.name: var for var in tf.global_variables() if 'raster_unit' in var.op.name} + elif perceptual_only: + load_var = {var.op.name: var for var in tf.global_variables() if 'VGG16' in var.op.name} + elif train_entire: + load_var = {var.op.name: var for var in tf.global_variables() + if 'discriminator' not in var.op.name + and 'raster_unit' not in var.op.name + and 'VGG16' not in var.op.name + and 'beta1' not in var.op.name + and 'beta2' not in var.op.name + and 'global_step' not in var.op.name + and 'Entire' not in var.op.name + } + else: + if gen_model_pretrain: + load_var = {var.op.name: var for var in tf.global_variables() + if 'discriminator' not in var.op.name + and 'raster_unit' not in var.op.name + and 'VGG16' not in var.op.name + and 'beta1' not in var.op.name + and 'beta2' not in var.op.name + # and 'global_step' not in var.op.name + } + else: + load_var = tf.global_variables() + + restorer = tf.train.Saver(load_var) + if not ras_only: + ckpt = tf.train.get_checkpoint_state(checkpoint_path) + model_checkpoint_path = ckpt.model_checkpoint_path + else: + model_checkpoint_path = checkpoint_path + print('Loading model %s' % model_checkpoint_path) + restorer.restore(sess, model_checkpoint_path) + + snapshot_step = model_checkpoint_path[model_checkpoint_path.rfind('-') + 1:] + return snapshot_step + + +def create_summary(summary_writer, summ_map, step): + for summ_key in summ_map: + summ_value = summ_map[summ_key] + summ = tf.summary.Summary() + summ.value.add(tag=summ_key, simple_value=float(summ_value)) + summary_writer.add_summary(summ, step) + summary_writer.flush() + + +def save_model(sess, saver, model_save_path, global_step): + checkpoint_path = os.path.join(model_save_path, 'p2s') + print('saving model %s.' % checkpoint_path) + print('global_step %i.' % global_step) + saver.save(sess, checkpoint_path, global_step=global_step) + + +############################################# +# Utils for basic image processing +############################################# + + +def normal(x, width): + return (int)(x * (width - 1) + 0.5) + + +def draw(f, width=128): + contour = [] + x0, y0, x1, y1, x2, y2, z0, z2, w0, w2 = f + x1 = x0 + (x2 - x0) * x1 + y1 = y0 + (y2 - y0) * y1 + x0 = normal(x0, width * 2) + x1 = normal(x1, width * 2) + x2 = normal(x2, width * 2) + y0 = normal(y0, width * 2) + y1 = normal(y1, width * 2) + y2 = normal(y2, width * 2) + z0 = (int)(1 + z0 * width // 2) + z2 = (int)(1 + z2 * width // 2) + canvas = np.zeros([width * 2, width * 2]).astype('float32') + tmp = 1. / 100 + for i in range(100): + # cv2.imshow('canvas', canvas) + # cv2.waitKey(30) + t = i * tmp + x = (int)((1-t) * (1-t) * x0 + 2 * t * (1-t) * x1 + t * t * x2) + y = (int)((1-t) * (1-t) * y0 + 2 * t * (1-t) * y1 + t * t * y2) + z = (int)((1-t) * z0 + t * z2) + w = (1-t) * w0 + t * w2 + # cv2.circle(canvas, (y, x), z, w, -1) + # 将线条设置成了固定的值 + cv2.circle(canvas, (y, x), 1, w, 2) + contour.append(np.array([[y, x]])) + # contour.append([[cursor_pos[0] * image_size, cursor_pos[1] * image_size]]) + return 1 - cv2.resize(canvas, dsize=(width, width)), np.array(contour) + + +def rgb_trans(split_num, break_values): + slice_per_split = split_num // 8 + break_values_head, break_values_tail = break_values[:-1], break_values[1:] + + results = [] + + for split_i in range(8): + break_value_head = break_values_head[split_i] + break_value_tail = break_values_tail[split_i] + + slice_gap = float(break_value_tail - break_value_head) / float(slice_per_split) + for slice_i in range(slice_per_split): + slice_val = break_value_head + slice_gap * slice_i + slice_val = int(round(slice_val)) + results.append(slice_val) + + return results + + +def get_colors(color_num): + split_num = (color_num // 8 + 1) * 8 + + r_break_values = [0, 0, 0, 0, 128, 255, 255, 255, 128] + g_break_values = [0, 0, 128, 255, 255, 255, 128, 0, 0] + b_break_values = [128, 255, 255, 255, 128, 0, 0, 0, 0] + + r_rst_list = rgb_trans(split_num, r_break_values) + g_rst_list = rgb_trans(split_num, g_break_values) + b_rst_list = rgb_trans(split_num, b_break_values) + + assert len(r_rst_list) == len(g_rst_list) + assert len(b_rst_list) == len(g_rst_list) + + rgb_color_list = [(r_rst_list[i], g_rst_list[i], b_rst_list[i]) for i in range(len(r_rst_list))] + return rgb_color_list + + +############################################# +# Utils for testing +############################################# + +def save_seq_data(save_root, save_filename, strokes_data, init_cursors, image_size, round_length, init_width): + seq_save_root = os.path.join(save_root, 'seq_data') + os.makedirs(seq_save_root, exist_ok=True) + save_npz_path = os.path.join(seq_save_root, save_filename + '.npz') + np.savez(save_npz_path, strokes_data=strokes_data, init_cursors=init_cursors, + image_size=image_size, round_length=round_length, init_width=init_width) + + +def image_pasting_v3_testing(patch_image, cursor, image_size, window_size_f, pasting_func, sess): + """ + :param patch_image: (raster_size, raster_size), [0.0-BG, 1.0-stroke] + :param cursor: (2), in size [0.0, 1.0) + :param window_size_f: (), float32, [0.0, image_size) + :return: (image_size, image_size), [0.0-BG, 1.0-stroke] + """ + cursor_pos = cursor * float(image_size) + pasted_image = sess.run(pasting_func.pasted_image, + feed_dict={pasting_func.patch_canvas: np.expand_dims(patch_image, axis=-1), + pasting_func.cursor_pos_a: cursor_pos, + pasting_func.image_size_a: image_size, + pasting_func.window_size_a: window_size_f}) + # (image_size, image_size, 1), [0.0-BG, 1.0-stroke] + pasted_image = pasted_image[:, :, 0] + return pasted_image + + +def draw_strokes(data, save_root, save_filename, input_img, image_size, init_cursor, infer_lengths, init_width, + cursor_type, raster_size, min_window_size, + sess, + pasting_func=None, + save_seq=False, draw_order=False): + """ + :param data: (N_strokes, 9): flag, x1, y1, x2, y2, r2, s2 + :return: + """ + canvas = np.zeros((image_size, image_size), dtype=np.float32) # [0.0-BG, 1.0-stroke] + canvas_color = np.zeros((image_size, image_size, 3), dtype=np.float32) + canvas_color_with_moving = np.zeros((image_size, image_size, 3), dtype=np.float32) + frames = [] + + cursor_idx = 0 + + stroke_count = len(data) + color_rgb_set = get_colors(stroke_count) # list of (3,) in [0, 255] + color_idx = 0 + + for round_idx in range(len(infer_lengths)): + round_length = infer_lengths[round_idx] + + cursor_pos = init_cursor[cursor_idx] # (2) + cursor_idx += 1 + + prev_width = init_width + prev_scaling = 1.0 + prev_window_size = raster_size # (1) + + for round_inner_i in range(round_length): + stroke_idx = np.sum(infer_lengths[:round_idx]).astype(np.int32) + round_inner_i + + curr_window_size = prev_scaling * prev_window_size + curr_window_size = np.maximum(curr_window_size, min_window_size) + curr_window_size = np.minimum(curr_window_size, image_size) + + pen_state = data[stroke_idx, 0] + stroke_params = data[stroke_idx, 1:] # (8) + + x1y1, x2y2, width2, scaling2 = stroke_params[0:2], stroke_params[2:4], stroke_params[4], stroke_params[5] + x0y0 = np.zeros_like(x2y2) # (2), [-1.0, 1.0] + x0y0 = np.divide(np.add(x0y0, 1.0), 2.0) # (2), [0.0, 1.0] + x2y2 = np.divide(np.add(x2y2, 1.0), 2.0) # (2), [0.0, 1.0] + widths = np.stack([prev_width, width2], axis=0) # (2) + stroke_params_proc = np.concatenate([x0y0, x1y1, x2y2, widths], axis=-1) # (8) + + next_width = stroke_params[4] + next_scaling = stroke_params[5] + next_window_size = next_scaling * curr_window_size + next_window_size = np.maximum(next_window_size, min_window_size) + next_window_size = np.minimum(next_window_size, image_size) + + prev_width = next_width * curr_window_size / next_window_size + prev_scaling = next_scaling + prev_window_size = curr_window_size + + f = stroke_params_proc.tolist() # (8) + f += [1.0, 1.0] + gt_stroke_img, _ = draw(f) # (raster_size, raster_size), [0.0-stroke, 1.0-BG] + gt_stroke_img_large = image_pasting_v3_testing(1.0 - gt_stroke_img, cursor_pos, image_size, + curr_window_size, + pasting_func, sess) # [0.0-BG, 1.0-stroke] + + if pen_state == 0: + canvas += gt_stroke_img_large # [0.0-BG, 1.0-stroke] + + if draw_order: + color_rgb = color_rgb_set[color_idx] # (3) in [0, 255] + color_idx += 1 + + color_rgb = np.reshape(color_rgb, (1, 1, 3)).astype(np.float32) + color_stroke = np.expand_dims(gt_stroke_img_large, axis=-1) * (1.0 - color_rgb / 255.0) + canvas_color_with_moving = canvas_color_with_moving * np.expand_dims((1.0 - gt_stroke_img_large), + axis=-1) + color_stroke # (H, W, 3) + + if pen_state == 0: + canvas_color = canvas_color * np.expand_dims((1.0 - gt_stroke_img_large), + axis=-1) + color_stroke # (H, W, 3) + + # update cursor_pos based on hps.cursor_type + new_cursor_offsets = stroke_params[2:4] * (curr_window_size / 2.0) # (1, 6), patch-level + new_cursor_offset_next = new_cursor_offsets + + # important!!! + new_cursor_offset_next = np.concatenate([new_cursor_offset_next[1:2], new_cursor_offset_next[0:1]], axis=-1) + + cursor_pos_large = cursor_pos * float(image_size) + + stroke_position_next = cursor_pos_large + new_cursor_offset_next # (2), large-level + + if cursor_type == 'next': + cursor_pos_large = stroke_position_next # (2), large-level + else: + raise Exception('Unknown cursor_type') + + cursor_pos_large = np.minimum(np.maximum(cursor_pos_large, 0.0), float(image_size - 1)) # (2), large-level + cursor_pos = cursor_pos_large / float(image_size) + + frames.append(canvas.copy()) + + canvas = np.clip(canvas, 0.0, 1.0) + canvas = np.round((1.0 - canvas) * 255.0).astype(np.uint8) # [0-stroke, 255-BG] + + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, save_filename) + canvas_img = Image.fromarray(canvas, 'L') + canvas_img.save(save_path, 'PNG') + + if save_seq: + seq_save_root = os.path.join(save_root, 'seq', save_filename[:-4]) + os.makedirs(seq_save_root, exist_ok=True) + for len_i in range(len(frames)): + frame = frames[len_i] + frame = np.round((1.0 - frame) * 255.0).astype(np.uint8) + save_path = os.path.join(seq_save_root, str(len_i) + '.png') + frame_img = Image.fromarray(frame, 'L') + frame_img.save(save_path, 'PNG') + + if draw_order: + order_save_root = os.path.join(save_root, 'order') + # order_comp_save_root = os.path.join(save_root, 'order-compare') + os.makedirs(order_save_root, exist_ok=True) + # os.makedirs(order_comp_save_root, exist_ok=True) + + canvas_color = 255 - np.round(canvas_color * 255.0).astype(np.uint8) + canvas_color_img = Image.fromarray(canvas_color, 'RGB') + save_path = os.path.join(order_save_root, save_filename) + canvas_color_img.save(save_path, 'PNG') + + canvas_color_with_moving = 255 - np.round(canvas_color_with_moving * 255.0).astype(np.uint8) + + # comparsions + # rows = 2 + # cols = 3 + # plt.figure(figsize=(5 * cols, 5 * rows)) + + # plt.subplot(rows, cols, 1) + # plt.title('Input', fontsize=12) + # # plt.axis('off') + # input_rgb = input_img + # plt.imshow(input_rgb) + + # # plt.subplot(rows, cols, 2) + # # plt.title('GT', fontsize=12) + # # # plt.axis('off') + # # gt_rgb = np.stack([gt_img for _ in range(3)], axis=2) + # # plt.imshow(gt_rgb) + + # plt.subplot(rows, cols, 2) + # plt.title('Sketch', fontsize=12) + # # plt.axis('off') + # canvas_rgb = np.stack([canvas for _ in range(3)], axis=2) + # plt.imshow(canvas_rgb) + + # plt.subplot(rows, cols, 4) + # plt.title('Sketch Order', fontsize=12) + # # plt.axis('off') + # plt.imshow(canvas_color) + + # plt.subplot(rows, cols, 5) + # plt.title('Sketch Order with moving', fontsize=12) + # # plt.axis('off') + # plt.imshow(canvas_color_with_moving) + + # plt.subplot(rows, cols, 6) + # plt.title('Order', fontsize=12) + # plt.axis('off') + + # img_h = 5 + # img_w = 10 + # color_array = np.zeros([len(color_rgb_set) * img_h, img_w, 3], dtype=np.uint8) + # for i in range(len(color_rgb_set)): + # color_array[i * img_h: i * img_h + img_h, :, :] = color_rgb_set[i] + + # plt.imshow(color_array) + + # comp_save_path = os.path.join(order_comp_save_root, save_filename) + # plt.savefig(comp_save_path) + # plt.close() + # plt.show() + + +def update_hyperparams(model_params, model_base_dir, model_name, infer_dataset): + with tf.io.gfile.GFile(os.path.join(model_base_dir, model_name, 'model_config.json'), 'r') as f: + data = json.load(f) + + ignored_keys = ['image_size_small', 'image_size_large', 'z_size', 'raster_perc_loss_layer', 'raster_loss_wk', + 'decreasing_sn', 'raster_loss_weight'] + for name in model_params._hparam_types.keys(): + if name not in data and name not in ignored_keys: + raise Exception(name, 'not in model_config.json') + + assert data['resize_method'] == 'AREA' + data['data_set'] = infer_dataset + fix_list = ['use_input_dropout', 'use_output_dropout', 'use_recurrent_dropout'] + for fix in fix_list: + data[fix] = (data[fix] == 1) + + pop_keys = ['gpus', 'image_size', 'resolution_type', 'loop_per_gpu', 'stroke_num_loss_weight_end', + 'perc_loss_fuse_type', + 'early_pen_length', 'early_pen_loss_type', 'early_pen_loss_weight', + 'increase_start_steps', 'perc_loss_layers', 'sn_loss_type', 'photo_prob_end_step', + 'sup_weight', 'gan_weight', 'base_raster_loss_base_type'] + for pop_key in pop_keys: + if pop_key in data.keys(): + data.pop(pop_key) + + model_params.parse_json(json.dumps(data)) + + return model_params diff --git a/robot_painting/qmupd_vs/vgg_utils/VGG16.py b/robot_painting/qmupd_vs/vgg_utils/VGG16.py new file mode 100644 index 0000000000000000000000000000000000000000..bb0d76b9b25601fe6f31be9f2b91d567459a06d7 --- /dev/null +++ b/robot_painting/qmupd_vs/vgg_utils/VGG16.py @@ -0,0 +1,128 @@ +import tensorflow as tf + + +def vgg_net(x, n_classes, img_size, reuse, is_train=True, dropout_rate=0.5): + # Define a scope for reusing the variables + with tf.variable_scope('VGG16', reuse=reuse): + x = tf.reshape(x, [-1, img_size, img_size, 1]) + + x = tf.layers.conv2d(inputs=x, filters=64, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.conv2d(inputs=x, filters=64, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#1', x.shape) + + x = tf.layers.conv2d(inputs=x, filters=128, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.conv2d(inputs=x, filters=128, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#2', x.shape) + + x = tf.layers.conv2d(inputs=x, filters=256, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.conv2d(inputs=x, filters=256, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.conv2d(inputs=x, filters=256, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#3', x.shape) + + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#4', x.shape) + + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#5', x.shape) + + x_shape = x.get_shape().as_list() + nodes = x_shape[1] * x_shape[2] * x_shape[3] + x = tf.reshape(x, [-1, nodes]) + + x = tf.layers.dense(x, 4096, activation=tf.nn.relu) + if is_train: + x = tf.layers.dropout(x, dropout_rate) + + x = tf.layers.dense(x, 4096, activation=tf.nn.relu) + if is_train: + x = tf.layers.dropout(x, dropout_rate) + + out = tf.layers.dense(x, n_classes) + print(out) + + return out + + +def vgg_net_slim(x, img_size): + return_map = {} + # Define a scope for reusing the variables + with tf.variable_scope('VGG16', reuse=tf.AUTO_REUSE): + x = tf.reshape(x, [-1, img_size, img_size, 1]) + + x = tf.layers.conv2d(inputs=x, filters=64, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU1_1'] = x + x = tf.layers.conv2d(inputs=x, filters=64, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU1_2'] = x + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#1', x.shape) #1 (?, 64, 64, 64) + + x = tf.layers.conv2d(inputs=x, filters=128, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU2_1'] = x + x = tf.layers.conv2d(inputs=x, filters=128, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU2_2'] = x + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#2', x.shape) #2 (?, 32, 32, 128) + + x = tf.layers.conv2d(inputs=x, filters=256, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU3_1'] = x + x = tf.layers.conv2d(inputs=x, filters=256, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU3_2'] = x + x = tf.layers.conv2d(inputs=x, filters=256, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU3_3'] = x + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#3', x.shape) #3 (?, 16, 16, 256) + + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU4_1'] = x + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU4_2'] = x + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU4_3'] = x + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#4', x.shape) #4 (?, 8, 8, 512) + + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU5_1'] = x + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU5_2'] = x + x = tf.layers.conv2d(inputs=x, filters=512, kernel_size=[3, 3], strides=1, + padding='SAME', activation=tf.nn.relu) + return_map['ReLU5_3'] = x + x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2) + print('#5', x.shape) #5 (?, 4, 4, 512) + + return return_map diff --git a/robot_painting/robot-sketch-vue/.env.development b/robot_painting/robot-sketch-vue/.env.development new file mode 100644 index 0000000000000000000000000000000000000000..0e000745f54848d3cc04f21848a4b53e217dadfe --- /dev/null +++ b/robot_painting/robot-sketch-vue/.env.development @@ -0,0 +1,10 @@ +# 开发环境读取配置文件路径 +VITE_PUBLIC_PATH = / + +# 开发环境代理 +VITE_PROXY_DOMAIN = /api + +# 开发环境后端地址 +VITE_PROXY_DOMAIN_REAL = http://192.168.253.92:8001 + +VITE_PROXY_DOMAIN_REAL2 = http://192.168.253.92:5000 \ No newline at end of file diff --git a/robot_painting/robot-sketch-vue/.env.production b/robot_painting/robot-sketch-vue/.env.production new file mode 100644 index 0000000000000000000000000000000000000000..9adbc6f53b36ce965beaf987426d96911ee77cff --- /dev/null +++ b/robot_painting/robot-sketch-vue/.env.production @@ -0,0 +1,5 @@ +# 生产环境项目打包路径 +VITE_PUBLIC_PATH = /production/ + +# 生产环境后端地址 +VITE_PROXY_DOMAIN_REAL = / \ No newline at end of file diff --git a/robot_painting/robot-sketch-vue/.env.test b/robot_painting/robot-sketch-vue/.env.test new file mode 100644 index 0000000000000000000000000000000000000000..75c834505283f99b64e1976a8c72c616e8fc2d44 --- /dev/null +++ b/robot_painting/robot-sketch-vue/.env.test @@ -0,0 +1,5 @@ +# 测试环境项目打包路径 +VITE_PUBLIC_PATH = /test/ + +# 测试环境后端地址 +VITE_PROXY_DOMAIN_REAL = / \ No newline at end of file diff --git a/robot_painting/robot-sketch-vue/.eslintignore b/robot_painting/robot-sketch-vue/.eslintignore new file mode 100644 index 0000000000000000000000000000000000000000..2e5b42b4c4982ef74baeb4041196b90434bd7296 --- /dev/null +++ b/robot_painting/robot-sketch-vue/.eslintignore @@ -0,0 +1,7 @@ +dist +node_modules +public +src/assets +*.d.ts +package.json +pnpm-lock.yaml \ No newline at end of file diff --git a/robot_painting/robot-sketch-vue/.eslintrc.js b/robot_painting/robot-sketch-vue/.eslintrc.js new file mode 100644 index 0000000000000000000000000000000000000000..55bb2c61697e2a2348f9756d51ff60487683eb50 --- /dev/null +++ b/robot_painting/robot-sketch-vue/.eslintrc.js @@ -0,0 +1,31 @@ +module.exports = { + env: { + browser: true, + es2021: true, + node: true + }, + parser: 'vue-eslint-parser', + extends: [ + 'eslint:recommended', + 'plugin:vue/vue3-essential', + 'plugin:@typescript-eslint/recommended', + 'plugin:prettier/recommended', + 'eslint-config-prettier', + './.eslintrc-auto-import.json' + ], + parserOptions: { + ecmaVersion: 'latest', + parser: '@typescript-eslint/parser', + sourceType: 'module' + }, + plugins: ['vue', '@typescript-eslint', 'prettier'], + rules: { + 'no-console': process.env.NODE_ENV === 'production' ? 'warn' : 'off', + 'no-debugger': process.env.NODE_ENV === 'production' ? 'warn' : 'off', + 'vue/multi-word-component-names': 0, // 关闭驼峰命名规则 + '@typescript-eslint/no-explicit-any': 'off', // any + 'no-else-return': 2, //如果if语句里面有return,后面不能跟else语句 + eqeqeq: 2, //必须使用全等 + "prettier/prettier": "off" + } +} diff --git a/robot_painting/robot-sketch-vue/.gitignore b/robot_painting/robot-sketch-vue/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..51e78819d3627ca2e99fa4a482d1ceb23a269f25 --- /dev/null +++ b/robot_painting/robot-sketch-vue/.gitignore @@ -0,0 +1,28 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? + +.eslintrc-auto-import.json +auto-imports.d.ts +components.d.ts \ No newline at end of file diff --git a/robot_painting/robot-sketch-vue/.prettierignore b/robot_painting/robot-sketch-vue/.prettierignore new file mode 100644 index 0000000000000000000000000000000000000000..2e5b42b4c4982ef74baeb4041196b90434bd7296 --- /dev/null +++ b/robot_painting/robot-sketch-vue/.prettierignore @@ -0,0 +1,7 @@ +dist +node_modules +public +src/assets +*.d.ts +package.json +pnpm-lock.yaml \ No newline at end of file diff --git a/robot_painting/robot-sketch-vue/.prettierrc.js b/robot_painting/robot-sketch-vue/.prettierrc.js new file mode 100644 index 0000000000000000000000000000000000000000..2dc397101edf9642304fca8fafb3c1bea48ebd0e --- /dev/null +++ b/robot_painting/robot-sketch-vue/.prettierrc.js @@ -0,0 +1,7 @@ +module.exports = { + singleQuote: true, // 使用单引号 + semi: false, // 行尾不需要有分号 + trailingComma: 'none', // 行尾不需要有逗号 + endOfLine: 'auto', // 换行符使用 lf + printWidth: 100 // 一行最多 100 字符 +} diff --git a/robot_painting/robot-sketch-vue/.vscode/extensions.json b/robot_painting/robot-sketch-vue/.vscode/extensions.json new file mode 100644 index 0000000000000000000000000000000000000000..a7cea0b0678120a1b590d1b6592c7318039b9179 --- /dev/null +++ b/robot_painting/robot-sketch-vue/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": ["Vue.volar"] +} diff --git a/robot_painting/robot-sketch-vue/README.md b/robot_painting/robot-sketch-vue/README.md new file mode 100644 index 0000000000000000000000000000000000000000..56ceedcd2e571db4b90fad829bbd09a1bc2a5c16 --- /dev/null +++ b/robot_painting/robot-sketch-vue/README.md @@ -0,0 +1,58 @@ +# Vue3模板 + +## 1. 快速上手 + +### 1.1 开发环境:全局安装 + +1. node +1. pnpm + +### 1.2 VSCode 推荐安装插件:适配 vue3+typescript+eslint + +1. TypeScript Vue Plugin (Volar) 用于 TypeScript 服务器的 Vue 插件 +2. Vue Language Features (Volar) Vue3.0 语法支持 +3. Vue 3 Snippets +4. ESLint +5. Prettier Formatter for Visual Studio Code + +### 1.3 本地开发 + +安装依赖: + +```js +pnpm i +``` + +启动项目: + +```js +pnpm dev +``` + +# 后端启动: +## 图像去背景 +工程地址:https://gitlab.rosc.org.cn/it/image-matting +1.49主机环境运行指令 +``` +conda activate robot-sketch + +python3 app.py +``` + +## 风格化 +工程地址:https://gitlab.rosc.org.cn/QingfengLi/qmupd_vs +1.49主机运行指令 + +``` +conda activate vsketch +export FLASK_APP=main +flask run --host=0.0.0.0 +``` + +## 浏览器无法调取摄像头 +参考地址:https://blog.csdn.net/baidu_31788709/article/details/125652048 + + + + + \ No newline at end of file diff --git a/robot_painting/robot-sketch-vue/config.ts b/robot_painting/robot-sketch-vue/config.ts new file mode 100644 index 0000000000000000000000000000000000000000..8ef6bc2420a18d713b2398b0129cc7712052b4f8 --- /dev/null +++ b/robot_painting/robot-sketch-vue/config.ts @@ -0,0 +1,3 @@ +export default { + PROJECT_NAME: 'vue3-project-template' // 工程名 +} diff --git a/robot_painting/robot-sketch-vue/index.html b/robot_painting/robot-sketch-vue/index.html new file mode 100644 index 0000000000000000000000000000000000000000..4d26006626d9b9af54900d7ad63b8ee77b185be9 --- /dev/null +++ b/robot_painting/robot-sketch-vue/index.html @@ -0,0 +1,13 @@ + + + + + + + Vue3+Element模板 + + +
+ + + diff --git a/robot_painting/robot-sketch-vue/package.json b/robot_painting/robot-sketch-vue/package.json new file mode 100644 index 0000000000000000000000000000000000000000..c6a9ea1ffb9089b333ba3faed544996fc0061eeb --- /dev/null +++ b/robot_painting/robot-sketch-vue/package.json @@ -0,0 +1,49 @@ +{ + "name": "vue3-element-template", + "version": "1.0.0", + "private": true, + "scripts": { + "dev": "vite", + "build": "vue-tsc --noEmit && vite build", + "preview": "vite preview", + "build:test": "vue-tsc --noEmit && vite build --mode test", + "preview:test": "vite preview --mode test", + "preinstall": "npx only-allow pnpm" + }, + "dependencies": { + "@vueuse/core": "^8.9.4", + "axios": "^0.27.2", + "element-plus": "^2.7.2", + "nprogress": "^0.2.0", + "pinia": "^2.0.14", + "simple-vue-camera": "^1.1.3", + "vue": "^3.2.25", + "vue-camera-lib": "^1.0.4", + "vue-i18n": "^9.1.10", + "vue-router": "^4.0.16" + }, + "devDependencies": { + "@iconify-json/ep": "^1.1.6", + "@types/node": "^18.0.0", + "@types/nprogress": "^0.2.0", + "@typescript-eslint/eslint-plugin": "^5.30.0", + "@typescript-eslint/parser": "^5.30.0", + "@vitejs/plugin-vue": "^2.3.3", + "consola": "^2.15.3", + "eslint": "^8.18.0", + "eslint-config-prettier": "^8.5.0", + "eslint-plugin-prettier": "^4.2.1", + "eslint-plugin-vue": "^9.1.1", + "prettier": "^2.7.1", + "sass": "^1.53.0", + "typescript": "^4.5.4", + "unplugin-auto-import": "^0.9.2", + "unplugin-icons": "^0.14.7", + "unplugin-vue-components": "^0.21.1", + "vite": "^2.9.9", + "vite-plugin-eslint": "^1.6.1", + "vue-tsc": "^0.34.7" + }, + "description": "", + "author": "duhuan" +} diff --git a/robot_painting/robot-sketch-vue/pnpm-lock.yaml b/robot_painting/robot-sketch-vue/pnpm-lock.yaml new file mode 100644 index 0000000000000000000000000000000000000000..33c44b8eb9274eaa598f102bba9b58e521ed8242 --- /dev/null +++ b/robot_painting/robot-sketch-vue/pnpm-lock.yaml @@ -0,0 +1,2765 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@vueuse/core': + specifier: ^8.9.4 + version: 8.9.4(vue@3.2.45) + axios: + specifier: ^0.27.2 + version: 0.27.2 + element-plus: + specifier: ^2.7.2 + version: 2.7.2(vue@3.2.45) + nprogress: + specifier: ^0.2.0 + version: 0.2.0 + pinia: + specifier: ^2.0.14 + version: 2.0.28(typescript@4.9.4)(vue@3.2.45) + simple-vue-camera: + specifier: ^1.1.3 + version: 1.1.3 + vue: + specifier: ^3.2.25 + version: 3.2.45 + vue-camera-lib: + specifier: ^1.0.4 + version: 1.0.4 + vue-i18n: + specifier: ^9.1.10 + version: 9.2.2(vue@3.2.45) + vue-router: + specifier: ^4.0.16 + version: 4.1.6(vue@3.2.45) + devDependencies: + '@iconify-json/ep': + specifier: ^1.1.6 + version: 1.1.8 + '@types/node': + specifier: ^18.0.0 + version: 18.11.18 + '@types/nprogress': + specifier: ^0.2.0 + version: 0.2.0 + '@typescript-eslint/eslint-plugin': + specifier: ^5.30.0 + version: 5.48.1(@typescript-eslint/parser@5.48.1(eslint@8.31.0)(typescript@4.9.4))(eslint@8.31.0)(typescript@4.9.4) + '@typescript-eslint/parser': + specifier: ^5.30.0 + version: 5.48.1(eslint@8.31.0)(typescript@4.9.4) + '@vitejs/plugin-vue': + specifier: ^2.3.3 + version: 2.3.4(vite@2.9.15(sass@1.57.1))(vue@3.2.45) + consola: + specifier: ^2.15.3 + version: 2.15.3 + eslint: + specifier: ^8.18.0 + version: 8.31.0 + eslint-config-prettier: + specifier: ^8.5.0 + version: 8.6.0(eslint@8.31.0) + eslint-plugin-prettier: + specifier: ^4.2.1 + version: 4.2.1(eslint-config-prettier@8.6.0(eslint@8.31.0))(eslint@8.31.0)(prettier@2.8.2) + eslint-plugin-vue: + specifier: ^9.1.1 + version: 9.8.0(eslint@8.31.0) + prettier: + specifier: ^2.7.1 + version: 2.8.2 + sass: + specifier: ^1.53.0 + version: 1.57.1 + typescript: + specifier: ^4.5.4 + version: 4.9.4 + unplugin-auto-import: + specifier: ^0.9.2 + version: 0.9.5(@vueuse/core@8.9.4(vue@3.2.45))(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1)) + unplugin-icons: + specifier: ^0.14.7 + version: 0.14.15(@vue/compiler-sfc@3.2.45) + unplugin-vue-components: + specifier: ^0.21.1 + version: 0.21.2(@babel/parser@7.20.7)(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1))(vue@3.2.45) + vite: + specifier: ^2.9.9 + version: 2.9.15(sass@1.57.1) + vite-plugin-eslint: + specifier: ^1.6.1 + version: 1.8.1(eslint@8.31.0)(vite@2.9.15(sass@1.57.1)) + vue-tsc: + specifier: ^0.34.7 + version: 0.34.17(typescript@4.9.4) + +packages: + + '@antfu/install-pkg@0.1.1': + resolution: {integrity: sha512-LyB/8+bSfa0DFGC06zpCEfs89/XoWZwws5ygEa5D+Xsm3OfI+aXQ86VgVG7Acyef+rSZ5HE7J8rrxzrQeM3PjQ==} + + '@antfu/utils@0.5.2': + resolution: {integrity: sha512-CQkeV+oJxUazwjlHD0/3ZD08QWKuGQkhnrKo3e6ly5pd48VUpXbb77q0xMU4+vc2CkJnDS02Eq/M9ugyX20XZA==} + + '@antfu/utils@0.7.2': + resolution: {integrity: sha512-vy9fM3pIxZmX07dL+VX1aZe7ynZ+YyB0jY+jE6r3hOK6GNY2t6W8rzpFC4tgpbXUYABkFQwgJq2XYXlxbXAI0g==} + + '@babel/helper-string-parser@7.24.1': + resolution: {integrity: sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.22.20': + resolution: {integrity: sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.20.7': + resolution: {integrity: sha512-T3Z9oHybU+0vZlY9CiDSJQTD5ZapcW18ZctFMi0MOAl/4BjFF4ul7NVSARLdbGO5vDqy9eQiGTV0LtKfvCYvcg==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/types@7.24.0': + resolution: {integrity: sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w==} + engines: {node: '>=6.9.0'} + + '@ctrl/tinycolor@3.6.1': + resolution: {integrity: sha512-SITSV6aIXsuVNV3f3O0f2n/cgyEDWoSqtZMYiAmcsYHydcKrOz3gUxB/iXd/Qf08+IZX4KpgNbvUdMBmWz+kcA==} + engines: {node: '>=10'} + + '@element-plus/icons-vue@2.3.1': + resolution: {integrity: sha512-XxVUZv48RZAd87ucGS48jPf6pKu0yV5UCg9f4FFwtrYxXOwWuVJo6wOvSLKEoMQKjv8GsX/mhP6UsC1lRwbUWg==} + peerDependencies: + vue: ^3.2.0 + + '@esbuild/linux-loong64@0.14.54': + resolution: {integrity: sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + + '@eslint/eslintrc@1.4.1': + resolution: {integrity: sha512-XXrH9Uarn0stsyldqDYq8r++mROmWRI1xKMXa640Bb//SY1+ECYX6VzT6Lcx5frD0V30XieqJ0oX9I2Xj5aoMA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@floating-ui/core@1.6.0': + resolution: {integrity: sha512-PcF++MykgmTj3CIyOQbKA/hDzOAiqI3mhuoN44WRCopIs1sgoDoU4oty4Jtqaj/y3oDU6fnVSm4QG0a3t5i0+g==} + + '@floating-ui/dom@1.6.3': + resolution: {integrity: sha512-RnDthu3mzPlQ31Ss/BTwQ1zjzIhr3lk1gZB1OC56h/1vEtaXkESrOqL5fQVMfXpwGtRwX+YsZBdyHtJMQnkArw==} + + '@floating-ui/utils@0.2.1': + resolution: {integrity: sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q==} + + '@humanwhocodes/config-array@0.11.8': + resolution: {integrity: sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==} + engines: {node: '>=10.10.0'} + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/object-schema@1.2.1': + resolution: {integrity: sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==} + + '@iconify-json/ep@1.1.8': + resolution: {integrity: sha512-pHCrsWU1R9/pTDU+Fps4+mjqOQFLtpGdXWegkhQ1P1DlgQAlCPyICtl6E1s8b7VwJMeZXaK84HA02UF6WD0o/Q==} + + '@iconify/types@2.0.0': + resolution: {integrity: sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==} + + '@iconify/utils@2.0.11': + resolution: {integrity: sha512-oEBZkHpkgySHcMZi80ycoJ+ZdLoJhmbN0+gUK8qNPY79ndus8fBHKPyjjwz5kIvr5U9yiym+xBTTnZOB3m5/Pw==} + + '@intlify/core-base@9.2.2': + resolution: {integrity: sha512-JjUpQtNfn+joMbrXvpR4hTF8iJQ2sEFzzK3KIESOx+f+uwIjgw20igOyaIdhfsVVBCds8ZM64MoeNSx+PHQMkA==} + engines: {node: '>= 14'} + + '@intlify/devtools-if@9.2.2': + resolution: {integrity: sha512-4ttr/FNO29w+kBbU7HZ/U0Lzuh2cRDhP8UlWOtV9ERcjHzuyXVZmjyleESK6eVP60tGC9QtQW9yZE+JeRhDHkg==} + engines: {node: '>= 14'} + + '@intlify/message-compiler@9.2.2': + resolution: {integrity: sha512-IUrQW7byAKN2fMBe8z6sK6riG1pue95e5jfokn8hA5Q3Bqy4MBJ5lJAofUsawQJYHeoPJ7svMDyBaVJ4d0GTtA==} + engines: {node: '>= 14'} + + '@intlify/shared@9.2.2': + resolution: {integrity: sha512-wRwTpsslgZS5HNyM7uDQYZtxnbI12aGiBZURX3BTR9RFIKKRWpllTsgzHWvj3HKm3Y2Sh5LPC1r0PDCKEhVn9Q==} + engines: {node: '>= 14'} + + '@intlify/vue-devtools@9.2.2': + resolution: {integrity: sha512-+dUyqyCHWHb/UcvY1MlIpO87munedm3Gn6E9WWYdWrMuYLcoIoOEVDWSS8xSwtlPU+kA+MEQTP6Q1iI/ocusJg==} + engines: {node: '>= 14'} + + '@nodelib/fs.scandir@2.1.5': + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + + '@nodelib/fs.stat@2.0.5': + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + '@nodelib/fs.walk@1.2.8': + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + + '@rollup/pluginutils@4.2.1': + resolution: {integrity: sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ==} + engines: {node: '>= 8.0.0'} + + '@sxzz/popperjs-es@2.11.7': + resolution: {integrity: sha512-Ccy0NlLkzr0Ex2FKvh2X+OyERHXJ88XJ1MXtsI9y9fGexlaXaVTPzBCRBwIxFkORuOb+uBqeu+RqnpgYTEZRUQ==} + + '@types/eslint@8.4.10': + resolution: {integrity: sha512-Sl/HOqN8NKPmhWo2VBEPm0nvHnu2LL3v9vKo8MEq0EtbJ4eVzGPl41VNPvn5E1i5poMk4/XD8UriLHpJvEP/Nw==} + + '@types/estree@1.0.0': + resolution: {integrity: sha512-WulqXMDUTYAXCjZnk6JtIHPigp55cVtDgDrO2gHRwhyJto21+1zbVCtOYB2L1F9w4qCQ0rOGWBnBe0FNTiEJIQ==} + + '@types/json-schema@7.0.11': + resolution: {integrity: sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==} + + '@types/lodash-es@4.17.12': + resolution: {integrity: sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==} + + '@types/lodash@4.17.0': + resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==} + + '@types/node@18.11.18': + resolution: {integrity: sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA==} + + '@types/nprogress@0.2.0': + resolution: {integrity: sha512-1cYJrqq9GezNFPsWTZpFut/d4CjpZqA0vhqDUPFWYKF1oIyBz5qnoYMzR+0C/T96t3ebLAC1SSnwrVOm5/j74A==} + + '@types/semver@7.3.13': + resolution: {integrity: sha512-21cFJr9z3g5dW8B0CVI9g2O9beqaThGQ6ZFBqHfwhzLDKUxaqTIy3vnfah/UPkfOiF2pLq+tGz+W8RyCskuslw==} + + '@types/web-bluetooth@0.0.14': + resolution: {integrity: sha512-5d2RhCard1nQUC3aHcq/gHzWYO6K0WJmAbjO7mQJgCQKtZpgXxv1rOM6O/dBDhDYYVutk1sciOgNSe+5YyfM8A==} + + '@types/web-bluetooth@0.0.16': + resolution: {integrity: sha512-oh8q2Zc32S6gd/j50GowEjKLoOVOwHP/bWVjKJInBwQqdOYMdPrf1oVlelTlyfFK3CKxL1uahMDAr+vy8T7yMQ==} + + '@typescript-eslint/eslint-plugin@5.48.1': + resolution: {integrity: sha512-9nY5K1Rp2ppmpb9s9S2aBiF3xo5uExCehMDmYmmFqqyxgenbHJ3qbarcLt4ITgaD6r/2ypdlcFRdcuVPnks+fQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + '@typescript-eslint/parser': ^5.0.0 + eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/parser@5.48.1': + resolution: {integrity: sha512-4yg+FJR/V1M9Xoq56SF9Iygqm+r5LMXvheo6DQ7/yUWynQ4YfCRnsKuRgqH4EQ5Ya76rVwlEpw4Xu+TgWQUcdA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/scope-manager@5.48.1': + resolution: {integrity: sha512-S035ueRrbxRMKvSTv9vJKIWgr86BD8s3RqoRZmsSh/s8HhIs90g6UlK8ZabUSjUZQkhVxt7nmZ63VJ9dcZhtDQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@typescript-eslint/type-utils@5.48.1': + resolution: {integrity: sha512-Hyr8HU8Alcuva1ppmqSYtM/Gp0q4JOp1F+/JH5D1IZm/bUBrV0edoewQZiEc1r6I8L4JL21broddxK8HAcZiqQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: '*' + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/types@5.48.1': + resolution: {integrity: sha512-xHyDLU6MSuEEdIlzrrAerCGS3T7AA/L8Hggd0RCYBi0w3JMvGYxlLlXHeg50JI9Tfg5MrtsfuNxbS/3zF1/ATg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@typescript-eslint/typescript-estree@5.48.1': + resolution: {integrity: sha512-Hut+Osk5FYr+sgFh8J/FHjqX6HFcDzTlWLrFqGoK5kVUN3VBHF/QzZmAsIXCQ8T/W9nQNBTqalxi1P3LSqWnRA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/utils@5.48.1': + resolution: {integrity: sha512-SmQuSrCGUOdmGMwivW14Z0Lj8dxG1mOFZ7soeJ0TQZEJcs3n5Ndgkg0A4bcMFzBELqLJ6GTHnEU+iIoaD6hFGA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 + + '@typescript-eslint/visitor-keys@5.48.1': + resolution: {integrity: sha512-Ns0XBwmfuX7ZknznfXozgnydyR8F6ev/KEGePP4i74uL3ArsKbEhJ7raeKr1JSa997DBDwol/4a0Y+At82c9dA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@vitejs/plugin-vue@2.3.4': + resolution: {integrity: sha512-IfFNbtkbIm36O9KB8QodlwwYvTEsJb4Lll4c2IwB3VHc2gie2mSPtSzL0eYay7X2jd/2WX02FjSGTWR6OPr/zg==} + engines: {node: '>=12.0.0'} + peerDependencies: + vite: ^2.5.10 + vue: ^3.2.25 + + '@volar/code-gen@0.34.17': + resolution: {integrity: sha512-rHR7BA71BJ/4S7xUOPMPiB7uk6iU9oTWpEMZxFi5VGC9iJmDncE82WzU5iYpcbOBCVHsOjMh0+5CGMgdO6SaPA==} + + '@volar/source-map@0.34.17': + resolution: {integrity: sha512-3yn1IMXJGGWB/G817/VFlFMi8oh5pmE7VzUqvgMZMrppaZpKj6/juvJIEiXNxRsgWc0RxIO8OSp4htdPUg1Raw==} + + '@volar/vue-code-gen@0.34.17': + resolution: {integrity: sha512-17pzcK29fyFWUc+C82J3JYSnA+jy3QNrIldb9kPaP9Itbik05ZjEIyEue9FjhgIAuHeYSn4LDM5s6nGjxyfhsQ==} + + '@volar/vue-typescript@0.34.17': + resolution: {integrity: sha512-U0YSVIBPRWVPmgJHNa4nrfq88+oS+tmyZNxmnfajIw9A/GOGZQiKXHC0k09SVvbYXlsjgJ6NIjhm9NuAhGRQjg==} + + '@vue/compiler-core@3.2.45': + resolution: {integrity: sha512-rcMj7H+PYe5wBV3iYeUgbCglC+pbpN8hBLTJvRiK2eKQiWqu+fG9F+8sW99JdL4LQi7Re178UOxn09puSXvn4A==} + + '@vue/compiler-dom@3.2.45': + resolution: {integrity: sha512-tyYeUEuKqqZO137WrZkpwfPCdiiIeXYCcJ8L4gWz9vqaxzIQRccTSwSWZ/Axx5YR2z+LvpUbmPNXxuBU45lyRw==} + + '@vue/compiler-sfc@3.2.45': + resolution: {integrity: sha512-1jXDuWah1ggsnSAOGsec8cFjT/K6TMZ0sPL3o3d84Ft2AYZi2jWJgRMjw4iaK0rBfA89L5gw427H4n1RZQBu6Q==} + + '@vue/compiler-ssr@3.2.45': + resolution: {integrity: sha512-6BRaggEGqhWht3lt24CrIbQSRD5O07MTmd+LjAn5fJj568+R9eUD2F7wMQJjX859seSlrYog7sUtrZSd7feqrQ==} + + '@vue/devtools-api@6.4.5': + resolution: {integrity: sha512-JD5fcdIuFxU4fQyXUu3w2KpAJHzTVdN+p4iOX2lMWSHMOoQdMAcpFLZzm9Z/2nmsoZ1a96QEhZ26e50xLBsgOQ==} + + '@vue/reactivity-transform@3.2.45': + resolution: {integrity: sha512-BHVmzYAvM7vcU5WmuYqXpwaBHjsS8T63jlKGWVtHxAHIoMIlmaMyurUSEs1Zcg46M4AYT5MtB1U274/2aNzjJQ==} + + '@vue/reactivity@3.2.45': + resolution: {integrity: sha512-PRvhCcQcyEVohW0P8iQ7HDcIOXRjZfAsOds3N99X/Dzewy8TVhTCT4uXpAHfoKjVTJRA0O0K+6QNkDIZAxNi3A==} + + '@vue/runtime-core@3.2.45': + resolution: {integrity: sha512-gzJiTA3f74cgARptqzYswmoQx0fIA+gGYBfokYVhF8YSXjWTUA2SngRzZRku2HbGbjzB6LBYSbKGIaK8IW+s0A==} + + '@vue/runtime-dom@3.2.45': + resolution: {integrity: sha512-cy88YpfP5Ue2bDBbj75Cb4bIEZUMM/mAkDMfqDTpUYVgTf/kuQ2VQ8LebuZ8k6EudgH8pYhsGWHlY0lcxlvTwA==} + + '@vue/server-renderer@3.2.45': + resolution: {integrity: sha512-ebiMq7q24WBU1D6uhPK//2OTR1iRIyxjF5iVq/1a5I1SDMDyDu4Ts6fJaMnjrvD3MqnaiFkKQj+LKAgz5WIK3g==} + peerDependencies: + vue: 3.2.45 + + '@vue/shared@3.2.45': + resolution: {integrity: sha512-Ewzq5Yhimg7pSztDV+RH1UDKBzmtqieXQlpTVm2AwraoRL/Rks96mvd8Vgi7Lj+h+TH8dv7mXD3FRZR3TUvbSg==} + + '@vueuse/core@8.9.4': + resolution: {integrity: sha512-B/Mdj9TK1peFyWaPof+Zf/mP9XuGAngaJZBwPaXBvU3aCTZlx3ltlrFFFyMV4iGBwsjSCeUCgZrtkEj9dS2Y3Q==} + peerDependencies: + '@vue/composition-api': ^1.1.0 + vue: ^2.6.0 || ^3.2.0 + peerDependenciesMeta: + '@vue/composition-api': + optional: true + vue: + optional: true + + '@vueuse/core@9.13.0': + resolution: {integrity: sha512-pujnclbeHWxxPRqXWmdkKV5OX4Wk4YeK7wusHqRwU0Q7EFusHoqNA/aPhB6KCh9hEqJkLAJo7bb0Lh9b+OIVzw==} + + '@vueuse/metadata@8.9.4': + resolution: {integrity: sha512-IwSfzH80bnJMzqhaapqJl9JRIiyQU0zsRGEgnxN6jhq7992cPUJIRfV+JHRIZXjYqbwt07E1gTEp0R0zPJ1aqw==} + + '@vueuse/metadata@9.13.0': + resolution: {integrity: sha512-gdU7TKNAUVlXXLbaF+ZCfte8BjRJQWPCa2J55+7/h+yDtzw3vOoGQDRXzI6pyKyo6bXFT5/QoPE4hAknExjRLQ==} + + '@vueuse/shared@8.9.4': + resolution: {integrity: sha512-wt+T30c4K6dGRMVqPddexEVLa28YwxW5OFIPmzUHICjphfAuBFTTdDoyqREZNDOFJZ44ARH1WWQNCUK8koJ+Ag==} + peerDependencies: + '@vue/composition-api': ^1.1.0 + vue: ^2.6.0 || ^3.2.0 + peerDependenciesMeta: + '@vue/composition-api': + optional: true + vue: + optional: true + + '@vueuse/shared@9.13.0': + resolution: {integrity: sha512-UrnhU+Cnufu4S6JLCPZnkWh0WwZGUp72ktOF2DFptMlOs3TOdVv8xJN53zhHGARmVOsz5KqOls09+J1NR6sBKw==} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.8.1: + resolution: {integrity: sha512-7zFpHzhnqYKrkYdUjF1HI1bzd0VygEGX8lFk4k5zVMqHEoES+P+7TKI+EvLO9WVMJ8eekdO0aDEK044xTXwPPA==} + engines: {node: '>=0.4.0'} + hasBin: true + + ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + array-union@2.1.0: + resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} + engines: {node: '>=8'} + + async-validator@4.2.5: + resolution: {integrity: sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + axios@0.27.2: + resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==} + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + binary-extensions@2.2.0: + resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==} + engines: {node: '>=8'} + + boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + + braces@3.0.2: + resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} + engines: {node: '>=8'} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + chokidar@3.5.3: + resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} + engines: {node: '>= 8.10.0'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + consola@2.15.3: + resolution: {integrity: sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==} + + cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: '>= 8'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + csstype@2.6.21: + resolution: {integrity: sha512-Z1PhmomIfypOpoMjRQB70jfvy/wxT50qW08YXO5lMIJkrdq4yOTR+AW7FqutScmB9NkLwxo+jU+kZLbofZZq/w==} + + dayjs@1.11.10: + resolution: {integrity: sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ==} + + debug@4.3.4: + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + deviceorientation-js@1.0.0: + resolution: {integrity: sha512-2TnjRD0TW/HYOdOxy13WHLv1J8UDU3Xa1fGhdE9pAq8Os6lbuN7H1ZyWyFzKXbT4nGfOUKMASUO/vuF8IJmxOg==} + + dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + + doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + + element-plus@2.7.2: + resolution: {integrity: sha512-AdEzBU/A68iUleio0MkQ46JeU5SeQvFFd915GJFScJmUEo5AmYg3OQ4pVjcu+p3b3Nupg9MC5Wa4xjAiC51kUg==} + peerDependencies: + vue: ^3.2.0 + + esbuild-android-64@0.14.54: + resolution: {integrity: sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + + esbuild-android-arm64@0.14.54: + resolution: {integrity: sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + + esbuild-darwin-64@0.14.54: + resolution: {integrity: sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + + esbuild-darwin-arm64@0.14.54: + resolution: {integrity: sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + + esbuild-freebsd-64@0.14.54: + resolution: {integrity: sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + + esbuild-freebsd-arm64@0.14.54: + resolution: {integrity: sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + + esbuild-linux-32@0.14.54: + resolution: {integrity: sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + + esbuild-linux-64@0.14.54: + resolution: {integrity: sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + + esbuild-linux-arm64@0.14.54: + resolution: {integrity: sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + + esbuild-linux-arm@0.14.54: + resolution: {integrity: sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + + esbuild-linux-mips64le@0.14.54: + resolution: {integrity: sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + + esbuild-linux-ppc64le@0.14.54: + resolution: {integrity: sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + + esbuild-linux-riscv64@0.14.54: + resolution: {integrity: sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + + esbuild-linux-s390x@0.14.54: + resolution: {integrity: sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + + esbuild-netbsd-64@0.14.54: + resolution: {integrity: sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + + esbuild-openbsd-64@0.14.54: + resolution: {integrity: sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + + esbuild-sunos-64@0.14.54: + resolution: {integrity: sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + + esbuild-windows-32@0.14.54: + resolution: {integrity: sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + + esbuild-windows-64@0.14.54: + resolution: {integrity: sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + + esbuild-windows-arm64@0.14.54: + resolution: {integrity: sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + + esbuild@0.14.54: + resolution: {integrity: sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA==} + engines: {node: '>=12'} + hasBin: true + + escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + escape-string-regexp@5.0.0: + resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} + engines: {node: '>=12'} + + eslint-config-prettier@8.6.0: + resolution: {integrity: sha512-bAF0eLpLVqP5oEVUFKpMA+NnRFICwn9X8B5jrR9FcqnYBuPbqWEjTEspPWMj5ye6czoSLDweCzSo3Ko7gGrZaA==} + hasBin: true + peerDependencies: + eslint: '>=7.0.0' + + eslint-plugin-prettier@4.2.1: + resolution: {integrity: sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==} + engines: {node: '>=12.0.0'} + peerDependencies: + eslint: '>=7.28.0' + eslint-config-prettier: '*' + prettier: '>=2.0.0' + peerDependenciesMeta: + eslint-config-prettier: + optional: true + + eslint-plugin-vue@9.8.0: + resolution: {integrity: sha512-E/AXwcTzunyzM83C2QqDHxepMzvI2y6x+mmeYHbVDQlKFqmKYvRrhaVixEeeG27uI44p9oKDFiyCRw4XxgtfHA==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.2.0 || ^7.0.0 || ^8.0.0 + + eslint-scope@5.1.1: + resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==} + engines: {node: '>=8.0.0'} + + eslint-scope@7.1.1: + resolution: {integrity: sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-utils@3.0.0: + resolution: {integrity: sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==} + engines: {node: ^10.0.0 || ^12.0.0 || >= 14.0.0} + peerDependencies: + eslint: '>=5' + + eslint-visitor-keys@2.1.0: + resolution: {integrity: sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==} + engines: {node: '>=10'} + + eslint-visitor-keys@3.3.0: + resolution: {integrity: sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint@8.31.0: + resolution: {integrity: sha512-0tQQEVdmPZ1UtUKXjX7EMm9BlgJ08G90IhWh0PKDCb3ZLsgAOHI8fYSIzYVZej92zsgq+ft0FGsxhJ3xo2tbuA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + + espree@9.4.1: + resolution: {integrity: sha512-XwctdmTO6SIvCzd9810yyNzIrOrqNYV9Koizx4C/mRhf9uq0o4yHoCEU/670pOxOL/MSraektvSAji79kX90Vg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + esquery@1.4.0: + resolution: {integrity: sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@4.3.0: + resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + execa@5.1.1: + resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} + engines: {node: '>=10'} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-diff@1.2.0: + resolution: {integrity: sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==} + + fast-glob@3.2.12: + resolution: {integrity: sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==} + engines: {node: '>=8.6.0'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fastq@1.15.0: + resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==} + + file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + + fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: '>=8'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@3.0.4: + resolution: {integrity: sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==} + engines: {node: ^10.12.0 || >=12.0.0} + + flatted@3.2.7: + resolution: {integrity: sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==} + + follow-redirects@1.15.2: + resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + + form-data@4.0.0: + resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==} + engines: {node: '>= 6'} + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.2: + resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.1: + resolution: {integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==} + + get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: '>=10'} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + + globals@13.19.0: + resolution: {integrity: sha512-dkQ957uSRWHw7CFXLUtUHQI3g3aWApYhfNR2O6jn/907riyTYKVBmxYVROkBcY614FSSeSJh7Xm7SrUWCxvJMQ==} + engines: {node: '>=8'} + + globby@11.1.0: + resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} + engines: {node: '>=10'} + + grapheme-splitter@1.0.4: + resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has@1.0.3: + resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} + engines: {node: '>= 0.4.0'} + + human-signals@2.1.0: + resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} + engines: {node: '>=10.17.0'} + + ignore@5.2.4: + resolution: {integrity: sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==} + engines: {node: '>= 4'} + + immutable@4.2.2: + resolution: {integrity: sha512-fTMKDwtbvO5tldky9QZ2fMX7slR0mYpY5nbnFWYp0fOzDhHqhgIw9KoYgxLWsoNTS9ZHGauHj18DTyEw6BK3Og==} + + import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: '>=6'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + + is-core-module@2.11.0: + resolution: {integrity: sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + + is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + js-sdsl@4.2.0: + resolution: {integrity: sha512-dyBIzQBDkCqCu+0upx25Y2jGdbTGxE9fshMsCdK0ViOongpV+n5tXRcZY9v7CaVQ79AGS9KA1KHtojxiM7aXSQ==} + + js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + jsonc-parser@3.2.0: + resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==} + + kolorist@1.6.0: + resolution: {integrity: sha512-dLkz37Ab97HWMx9KTes3Tbi3D1ln9fCAy2zr2YVExJasDRPGRaKcoE4fycWNtnCAJfjFqe0cnY+f8KT2JePEXQ==} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + local-pkg@0.4.2: + resolution: {integrity: sha512-mlERgSPrbxU3BP4qBqAvvwlgW4MTg78iwJdGGnv7kibKjWcJksrG3t6LB5lXI93wXRDvG4NpUgJFmTG4T6rdrg==} + engines: {node: '>=14'} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash-es@4.17.21: + resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==} + + lodash-unified@1.0.3: + resolution: {integrity: sha512-WK9qSozxXOD7ZJQlpSqOT+om2ZfcT4yO+03FuzAHD0wF6S0l0090LRPDx3vhTTLZ8cFKpBn+IOcVXK6qOcIlfQ==} + peerDependencies: + '@types/lodash-es': '*' + lodash: '*' + lodash-es: '*' + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + + lru-cache@6.0.0: + resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} + engines: {node: '>=10'} + + magic-string@0.25.9: + resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==} + + magic-string@0.26.7: + resolution: {integrity: sha512-hX9XH3ziStPoPhJxLq1syWuZMxbDvGNbVchfrdCtanC7D13888bMFow61x8axrx+GfHLtVeAx2kxL7tTGRl+Ow==} + engines: {node: '>=12'} + + memoize-one@6.0.0: + resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==} + + merge-stream@2.0.0: + resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + micromatch@4.0.5: + resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==} + engines: {node: '>=8.6'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + mimic-fn@2.1.0: + resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} + engines: {node: '>=6'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@5.1.2: + resolution: {integrity: sha512-bNH9mmM9qsJ2X4r2Nat1B//1dJVcn3+iBLa3IgqJ7EbGaDNepL9QSHOxN4ng33s52VMMhhIfgCYDk3C4ZmlDAg==} + engines: {node: '>=10'} + + mlly@0.5.17: + resolution: {integrity: sha512-Rn+ai4G+CQXptDFSRNnChEgNr+xAEauYhwRvpPl/UHStTlgkIftplgJRsA2OXPuoUn86K4XAjB26+x5CEvVb6A==} + + mlly@1.1.0: + resolution: {integrity: sha512-cwzBrBfwGC1gYJyfcy8TcZU1f+dbH/T+TuOhtYP2wLv/Fb51/uV7HJQfBPtEupZ2ORLRU1EKFS/QfS3eo9+kBQ==} + + ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + + nanoid@3.3.4: + resolution: {integrity: sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare-lite@1.4.0: + resolution: {integrity: sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==} + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + normalize-wheel-es@1.2.0: + resolution: {integrity: sha512-Wj7+EJQ8mSuXr2iWfnujrimU35R2W4FAErEyTmJoJ7ucwTn2hOUSsRehMb5RSYkxXGTM7Y9QpvPmp++w5ftoJw==} + + npm-run-path@4.0.1: + resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} + engines: {node: '>=8'} + + nprogress@0.2.0: + resolution: {integrity: sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==} + + nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + onetime@5.1.2: + resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} + engines: {node: '>=6'} + + optionator@0.9.1: + resolution: {integrity: sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + + pathe@0.3.9: + resolution: {integrity: sha512-6Y6s0vT112P3jD8dGfuS6r+lpa0qqNrLyHPOwvXMnyNTQaYiwgau2DP3aNDsR13xqtGj7rrPo+jFUATpU6/s+g==} + + pathe@1.0.0: + resolution: {integrity: sha512-nPdMG0Pd09HuSsr7QOKUXO2Jr9eqaDiZvDwdyIhNG5SHYujkQHYKDfGQkulBxvbDHz8oHLsTgKN86LSwYzSHAg==} + + picocolors@1.0.0: + resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + pinia@2.0.28: + resolution: {integrity: sha512-YClq9DkqCblq9rlyUual7ezMu/iICWdBtfJrDt4oWU9Zxpijyz7xB2xTwx57DaBQ96UGvvTMORzALr+iO5PVMw==} + peerDependencies: + '@vue/composition-api': ^1.4.0 + typescript: '>=4.4.4' + vue: ^2.6.14 || ^3.2.0 + peerDependenciesMeta: + '@vue/composition-api': + optional: true + typescript: + optional: true + + pkg-types@1.0.1: + resolution: {integrity: sha512-jHv9HB+Ho7dj6ItwppRDDl0iZRYBD0jsakHXtFgoLr+cHSF6xC+QL54sJmWxyGxOLYSHm0afhXhXcQDQqH9z8g==} + + postcss-selector-parser@6.0.11: + resolution: {integrity: sha512-zbARubNdogI9j7WY4nQJBiNqQf3sLS3wCP4WfOidu+p28LofJqDH1tcXypGrcmMHhDk2t9wGhCsYe/+szLTy1g==} + engines: {node: '>=4'} + + postcss@8.4.21: + resolution: {integrity: sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prettier-linter-helpers@1.0.0: + resolution: {integrity: sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==} + engines: {node: '>=6.0.0'} + + prettier@2.8.2: + resolution: {integrity: sha512-BtRV9BcncDyI2tsuS19zzhzoxD8Dh8LiCx7j7tHzrkz8GFXAexeWFdi22mjE1d16dftH2qNaytVxqiRTGlMfpw==} + engines: {node: '>=10.13.0'} + hasBin: true + + punycode@2.1.1: + resolution: {integrity: sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==} + engines: {node: '>=6'} + + queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + + regexpp@3.2.0: + resolution: {integrity: sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==} + engines: {node: '>=8'} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + resolve@1.22.1: + resolution: {integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==} + hasBin: true + + reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + hasBin: true + + rollup@2.77.3: + resolution: {integrity: sha512-/qxNTG7FbmefJWoeeYJFbHehJ2HNWnjkAFRKzWN/45eNBBF/r8lo992CwcJXEzyVxs5FmfId+vTSTQDb+bxA+g==} + engines: {node: '>=10.0.0'} + hasBin: true + + rollup@2.79.1: + resolution: {integrity: sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==} + engines: {node: '>=10.0.0'} + hasBin: true + + rotate-canvas@1.0.0: + resolution: {integrity: sha512-ZLWB9QbGotvlKuTWuNtlMVQHDWB59UUY3xnzJo0jO+ovbymEyucPUTxKg+a5aYRwkDHS/TWvcZHj/eVYwyTswg==} + + run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + + sass@1.57.1: + resolution: {integrity: sha512-O2+LwLS79op7GI0xZ8fqzF7X2m/m8WFfI02dHOdsK5R2ECeS5F62zrwg/relM1rjSLy7Vd/DiMNIvPrQGsA0jw==} + engines: {node: '>=12.0.0'} + hasBin: true + + scule@0.2.1: + resolution: {integrity: sha512-M9gnWtn3J0W+UhJOHmBxBTwv8mZCan5i1Himp60t6vvZcor0wr+IM0URKmIglsWJ7bRujNAVVN77fp+uZaWoKg==} + + semver@7.3.8: + resolution: {integrity: sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==} + engines: {node: '>=10'} + hasBin: true + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + + simple-vue-camera@1.1.3: + resolution: {integrity: sha512-GVAYq1BMI9cHt+h24tu2dfIFFvhjVQ1M8IkK5LmrKcYoBA8FZlLNlhrHC2NnTPbMAXIvJn1Bqx8X6Q31+Y2+jA==} + + slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + source-map-js@1.0.2: + resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==} + engines: {node: '>=0.10.0'} + + source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + sourcemap-codec@1.4.8: + resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==} + deprecated: Please use @jridgewell/sourcemap-codec instead + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-final-newline@2.0.0: + resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} + engines: {node: '>=6'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + strip-literal@0.4.2: + resolution: {integrity: sha512-pv48ybn4iE1O9RLgCAN0iU4Xv7RlBTiit6DKmMiErbs9x1wH6vXBs45tWc0H5wUIF6TLTrKweqkmYF/iraQKNw==} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + + to-fast-properties@2.0.0: + resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} + engines: {node: '>=4'} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + tslib@1.14.1: + resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==} + + tsutils@3.21.0: + resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==} + engines: {node: '>= 6'} + peerDependencies: + typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta' + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + + typescript@4.9.4: + resolution: {integrity: sha512-Uz+dTXYzxXXbsFpM86Wh3dKCxrQqUcVMxwU54orwlJjOpO3ao8L7j5lH+dWfTwgCwIuM9GQ2kvVotzYJMXTBZg==} + engines: {node: '>=4.2.0'} + hasBin: true + + ufo@1.0.1: + resolution: {integrity: sha512-boAm74ubXHY7KJQZLlXrtMz52qFvpsbOxDcZOnw/Wf+LS4Mmyu7JxmzD4tDLtUQtmZECypJ0FrCz4QIe6dvKRA==} + + unimport@0.4.7: + resolution: {integrity: sha512-V2Pbscd1VSdgWm1/OI2pjtydEOTjE7DDnHZKhpOq7bSUBc1i8+1f6PK8jI1lJ1plRDcSNr0DLtAmtU9NPkFQpw==} + + unplugin-auto-import@0.9.5: + resolution: {integrity: sha512-CskZjMM+p/QZev7y4JgaAFrf63ui4VGS4HrDMm6VIiVjwnmQ0wPugo58GGhYa+W2Hyv6zGffYO6uYHfeVlDZDA==} + engines: {node: '>=14'} + peerDependencies: + '@vueuse/core': '*' + peerDependenciesMeta: + '@vueuse/core': + optional: true + + unplugin-icons@0.14.15: + resolution: {integrity: sha512-J6YBA+fUzVM2IZPXCK3Pnk36jYVwQ6lkjRgOnZaXNIxpMDsmwDqrE1AGJ0zUbfuEoOa90OBGc0OPfN1r+qlSIQ==} + peerDependencies: + '@svgr/core': '>=5.5.0' + '@vue/compiler-sfc': ^3.0.2 + vue-template-compiler: ^2.6.12 + vue-template-es2015-compiler: ^1.9.0 + peerDependenciesMeta: + '@svgr/core': + optional: true + '@vue/compiler-sfc': + optional: true + vue-template-compiler: + optional: true + vue-template-es2015-compiler: + optional: true + + unplugin-vue-components@0.21.2: + resolution: {integrity: sha512-HBU+EuesDj/HRs7EtYH7gBACljVhqLylltrCLModRmCToIIrrNvMh54aylUt4AD4qiwylgOx4Vgb9sBlrIcRDw==} + engines: {node: '>=14'} + peerDependencies: + '@babel/parser': ^7.15.8 + vue: 2 || 3 + peerDependenciesMeta: + '@babel/parser': + optional: true + + unplugin@0.7.2: + resolution: {integrity: sha512-m7thX4jP8l5sETpLdUASoDOGOcHaOVtgNyrYlToyQUvILUtEzEnngRBrHnAX3IKqooJVmXpoa/CwQ/QqzvGaHQ==} + peerDependencies: + esbuild: '>=0.13' + rollup: ^2.50.0 + vite: ^2.3.0 || ^3.0.0-0 + webpack: 4 || 5 + peerDependenciesMeta: + esbuild: + optional: true + rollup: + optional: true + vite: + optional: true + webpack: + optional: true + + unplugin@1.0.1: + resolution: {integrity: sha512-aqrHaVBWW1JVKBHmGo33T5TxeL0qWzfvjWokObHA9bYmN7eNDkwOxmLjhioHl9878qDFMAaT51XNroRyuz7WxA==} + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + vite-plugin-eslint@1.8.1: + resolution: {integrity: sha512-PqdMf3Y2fLO9FsNPmMX+//2BF5SF8nEWspZdgl4kSt7UvHDRHVVfHvxsD7ULYzZrJDGRxR81Nq7TOFgwMnUang==} + peerDependencies: + eslint: '>=7' + vite: '>=2' + + vite@2.9.15: + resolution: {integrity: sha512-fzMt2jK4vQ3yK56te3Kqpkaeq9DkcZfBbzHwYpobasvgYmP2SoAr6Aic05CsB4CzCZbsDv4sujX3pkEGhLabVQ==} + engines: {node: '>=12.2.0'} + hasBin: true + peerDependencies: + less: '*' + sass: '*' + stylus: '*' + peerDependenciesMeta: + less: + optional: true + sass: + optional: true + stylus: + optional: true + + vue-camera-lib@1.0.4: + resolution: {integrity: sha512-LlMrOqb8j/bOrgWYW1UQKnDFQLJLmHM5d8QyTjwEoArw9f4jsbz7TXa0btmZkTLToVQYNgdGykj5OG57s0pqZA==} + + vue-demi@0.13.11: + resolution: {integrity: sha512-IR8HoEEGM65YY3ZJYAjMlKygDQn25D5ajNFNoKh9RSDMQtlzCxtfQjdQgv9jjK+m3377SsJXY8ysq8kLCZL25A==} + engines: {node: '>=12'} + hasBin: true + peerDependencies: + '@vue/composition-api': ^1.0.0-rc.1 + vue: ^3.0.0-0 || ^2.6.0 + peerDependenciesMeta: + '@vue/composition-api': + optional: true + + vue-eslint-parser@9.1.0: + resolution: {integrity: sha512-NGn/iQy8/Wb7RrRa4aRkokyCZfOUWk19OP5HP6JEozQFX5AoS/t+Z0ZN7FY4LlmWc4FNI922V7cvX28zctN8dQ==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: '>=6.0.0' + + vue-i18n@9.2.2: + resolution: {integrity: sha512-yswpwtj89rTBhegUAv9Mu37LNznyu3NpyLQmozF3i1hYOhwpG8RjcjIFIIfnu+2MDZJGSZPXaKWvnQA71Yv9TQ==} + engines: {node: '>= 14'} + peerDependencies: + vue: ^3.0.0 + + vue-router@4.1.6: + resolution: {integrity: sha512-DYWYwsG6xNPmLq/FmZn8Ip+qrhFEzA14EI12MsMgVxvHFDYvlr4NXpVF5hrRH1wVcDP8fGi5F4rxuJSl8/r+EQ==} + peerDependencies: + vue: ^3.2.0 + + vue-tsc@0.34.17: + resolution: {integrity: sha512-jzUXky44ZLHC4daaJag7FQr3idlPYN719/K1eObGljz5KaS2UnVGTU/XSYCd7d6ampYYg4OsyalbHyJIxV0aEQ==} + hasBin: true + peerDependencies: + typescript: '*' + + vue@3.2.45: + resolution: {integrity: sha512-9Nx/Mg2b2xWlXykmCwiTUCWHbWIj53bnkizBxKai1g61f2Xit700A1ljowpTIM11e3uipOeiPcSqnmBg6gyiaA==} + + webpack-sources@3.2.3: + resolution: {integrity: sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==} + engines: {node: '>=10.13.0'} + + webpack-virtual-modules@0.4.6: + resolution: {integrity: sha512-5tyDlKLqPfMqjT3Q9TAqf2YqjwmnUleZwzJi1A5qXnlBCdj2AtOJ6wAWdglTIDOPgOiOrXeBeFcsQ8+aGQ6QbA==} + + webpack-virtual-modules@0.5.0: + resolution: {integrity: sha512-kyDivFZ7ZM0BVOUteVbDFhlRt7Ah/CSPwJdi8hBpkK7QLumUqdLtVfm/PX/hkcnrvr0i77fO5+TjZ94Pe+C9iw==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + word-wrap@1.2.3: + resolution: {integrity: sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==} + engines: {node: '>=0.10.0'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + xml-name-validator@4.0.0: + resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} + engines: {node: '>=12'} + + yallist@4.0.0: + resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + +snapshots: + + '@antfu/install-pkg@0.1.1': + dependencies: + execa: 5.1.1 + find-up: 5.0.0 + + '@antfu/utils@0.5.2': {} + + '@antfu/utils@0.7.2': {} + + '@babel/helper-string-parser@7.24.1': {} + + '@babel/helper-validator-identifier@7.22.20': {} + + '@babel/parser@7.20.7': + dependencies: + '@babel/types': 7.24.0 + + '@babel/types@7.24.0': + dependencies: + '@babel/helper-string-parser': 7.24.1 + '@babel/helper-validator-identifier': 7.22.20 + to-fast-properties: 2.0.0 + + '@ctrl/tinycolor@3.6.1': {} + + '@element-plus/icons-vue@2.3.1(vue@3.2.45)': + dependencies: + vue: 3.2.45 + + '@esbuild/linux-loong64@0.14.54': + optional: true + + '@eslint/eslintrc@1.4.1': + dependencies: + ajv: 6.12.6 + debug: 4.3.4 + espree: 9.4.1 + globals: 13.19.0 + ignore: 5.2.4 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@floating-ui/core@1.6.0': + dependencies: + '@floating-ui/utils': 0.2.1 + + '@floating-ui/dom@1.6.3': + dependencies: + '@floating-ui/core': 1.6.0 + '@floating-ui/utils': 0.2.1 + + '@floating-ui/utils@0.2.1': {} + + '@humanwhocodes/config-array@0.11.8': + dependencies: + '@humanwhocodes/object-schema': 1.2.1 + debug: 4.3.4 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/object-schema@1.2.1': {} + + '@iconify-json/ep@1.1.8': + dependencies: + '@iconify/types': 2.0.0 + + '@iconify/types@2.0.0': {} + + '@iconify/utils@2.0.11': + dependencies: + '@antfu/install-pkg': 0.1.1 + '@antfu/utils': 0.7.2 + '@iconify/types': 2.0.0 + debug: 4.3.4 + kolorist: 1.6.0 + local-pkg: 0.4.2 + transitivePeerDependencies: + - supports-color + + '@intlify/core-base@9.2.2': + dependencies: + '@intlify/devtools-if': 9.2.2 + '@intlify/message-compiler': 9.2.2 + '@intlify/shared': 9.2.2 + '@intlify/vue-devtools': 9.2.2 + + '@intlify/devtools-if@9.2.2': + dependencies: + '@intlify/shared': 9.2.2 + + '@intlify/message-compiler@9.2.2': + dependencies: + '@intlify/shared': 9.2.2 + source-map: 0.6.1 + + '@intlify/shared@9.2.2': {} + + '@intlify/vue-devtools@9.2.2': + dependencies: + '@intlify/core-base': 9.2.2 + '@intlify/shared': 9.2.2 + + '@nodelib/fs.scandir@2.1.5': + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + '@nodelib/fs.stat@2.0.5': {} + + '@nodelib/fs.walk@1.2.8': + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.15.0 + + '@rollup/pluginutils@4.2.1': + dependencies: + estree-walker: 2.0.2 + picomatch: 2.3.1 + + '@sxzz/popperjs-es@2.11.7': {} + + '@types/eslint@8.4.10': + dependencies: + '@types/estree': 1.0.0 + '@types/json-schema': 7.0.11 + + '@types/estree@1.0.0': {} + + '@types/json-schema@7.0.11': {} + + '@types/lodash-es@4.17.12': + dependencies: + '@types/lodash': 4.17.0 + + '@types/lodash@4.17.0': {} + + '@types/node@18.11.18': {} + + '@types/nprogress@0.2.0': {} + + '@types/semver@7.3.13': {} + + '@types/web-bluetooth@0.0.14': {} + + '@types/web-bluetooth@0.0.16': {} + + '@typescript-eslint/eslint-plugin@5.48.1(@typescript-eslint/parser@5.48.1(eslint@8.31.0)(typescript@4.9.4))(eslint@8.31.0)(typescript@4.9.4)': + dependencies: + '@typescript-eslint/parser': 5.48.1(eslint@8.31.0)(typescript@4.9.4) + '@typescript-eslint/scope-manager': 5.48.1 + '@typescript-eslint/type-utils': 5.48.1(eslint@8.31.0)(typescript@4.9.4) + '@typescript-eslint/utils': 5.48.1(eslint@8.31.0)(typescript@4.9.4) + debug: 4.3.4 + eslint: 8.31.0 + ignore: 5.2.4 + natural-compare-lite: 1.4.0 + regexpp: 3.2.0 + semver: 7.3.8 + tsutils: 3.21.0(typescript@4.9.4) + optionalDependencies: + typescript: 4.9.4 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@5.48.1(eslint@8.31.0)(typescript@4.9.4)': + dependencies: + '@typescript-eslint/scope-manager': 5.48.1 + '@typescript-eslint/types': 5.48.1 + '@typescript-eslint/typescript-estree': 5.48.1(typescript@4.9.4) + debug: 4.3.4 + eslint: 8.31.0 + optionalDependencies: + typescript: 4.9.4 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@5.48.1': + dependencies: + '@typescript-eslint/types': 5.48.1 + '@typescript-eslint/visitor-keys': 5.48.1 + + '@typescript-eslint/type-utils@5.48.1(eslint@8.31.0)(typescript@4.9.4)': + dependencies: + '@typescript-eslint/typescript-estree': 5.48.1(typescript@4.9.4) + '@typescript-eslint/utils': 5.48.1(eslint@8.31.0)(typescript@4.9.4) + debug: 4.3.4 + eslint: 8.31.0 + tsutils: 3.21.0(typescript@4.9.4) + optionalDependencies: + typescript: 4.9.4 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@5.48.1': {} + + '@typescript-eslint/typescript-estree@5.48.1(typescript@4.9.4)': + dependencies: + '@typescript-eslint/types': 5.48.1 + '@typescript-eslint/visitor-keys': 5.48.1 + debug: 4.3.4 + globby: 11.1.0 + is-glob: 4.0.3 + semver: 7.3.8 + tsutils: 3.21.0(typescript@4.9.4) + optionalDependencies: + typescript: 4.9.4 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@5.48.1(eslint@8.31.0)(typescript@4.9.4)': + dependencies: + '@types/json-schema': 7.0.11 + '@types/semver': 7.3.13 + '@typescript-eslint/scope-manager': 5.48.1 + '@typescript-eslint/types': 5.48.1 + '@typescript-eslint/typescript-estree': 5.48.1(typescript@4.9.4) + eslint: 8.31.0 + eslint-scope: 5.1.1 + eslint-utils: 3.0.0(eslint@8.31.0) + semver: 7.3.8 + transitivePeerDependencies: + - supports-color + - typescript + + '@typescript-eslint/visitor-keys@5.48.1': + dependencies: + '@typescript-eslint/types': 5.48.1 + eslint-visitor-keys: 3.3.0 + + '@vitejs/plugin-vue@2.3.4(vite@2.9.15(sass@1.57.1))(vue@3.2.45)': + dependencies: + vite: 2.9.15(sass@1.57.1) + vue: 3.2.45 + + '@volar/code-gen@0.34.17': + dependencies: + '@volar/source-map': 0.34.17 + + '@volar/source-map@0.34.17': {} + + '@volar/vue-code-gen@0.34.17': + dependencies: + '@volar/code-gen': 0.34.17 + '@volar/source-map': 0.34.17 + '@vue/compiler-core': 3.2.45 + '@vue/compiler-dom': 3.2.45 + '@vue/shared': 3.2.45 + + '@volar/vue-typescript@0.34.17': + dependencies: + '@volar/code-gen': 0.34.17 + '@volar/source-map': 0.34.17 + '@volar/vue-code-gen': 0.34.17 + '@vue/compiler-sfc': 3.2.45 + '@vue/reactivity': 3.2.45 + + '@vue/compiler-core@3.2.45': + dependencies: + '@babel/parser': 7.20.7 + '@vue/shared': 3.2.45 + estree-walker: 2.0.2 + source-map: 0.6.1 + + '@vue/compiler-dom@3.2.45': + dependencies: + '@vue/compiler-core': 3.2.45 + '@vue/shared': 3.2.45 + + '@vue/compiler-sfc@3.2.45': + dependencies: + '@babel/parser': 7.20.7 + '@vue/compiler-core': 3.2.45 + '@vue/compiler-dom': 3.2.45 + '@vue/compiler-ssr': 3.2.45 + '@vue/reactivity-transform': 3.2.45 + '@vue/shared': 3.2.45 + estree-walker: 2.0.2 + magic-string: 0.25.9 + postcss: 8.4.21 + source-map: 0.6.1 + + '@vue/compiler-ssr@3.2.45': + dependencies: + '@vue/compiler-dom': 3.2.45 + '@vue/shared': 3.2.45 + + '@vue/devtools-api@6.4.5': {} + + '@vue/reactivity-transform@3.2.45': + dependencies: + '@babel/parser': 7.20.7 + '@vue/compiler-core': 3.2.45 + '@vue/shared': 3.2.45 + estree-walker: 2.0.2 + magic-string: 0.25.9 + + '@vue/reactivity@3.2.45': + dependencies: + '@vue/shared': 3.2.45 + + '@vue/runtime-core@3.2.45': + dependencies: + '@vue/reactivity': 3.2.45 + '@vue/shared': 3.2.45 + + '@vue/runtime-dom@3.2.45': + dependencies: + '@vue/runtime-core': 3.2.45 + '@vue/shared': 3.2.45 + csstype: 2.6.21 + + '@vue/server-renderer@3.2.45(vue@3.2.45)': + dependencies: + '@vue/compiler-ssr': 3.2.45 + '@vue/shared': 3.2.45 + vue: 3.2.45 + + '@vue/shared@3.2.45': {} + + '@vueuse/core@8.9.4(vue@3.2.45)': + dependencies: + '@types/web-bluetooth': 0.0.14 + '@vueuse/metadata': 8.9.4 + '@vueuse/shared': 8.9.4(vue@3.2.45) + vue-demi: 0.13.11(vue@3.2.45) + optionalDependencies: + vue: 3.2.45 + + '@vueuse/core@9.13.0(vue@3.2.45)': + dependencies: + '@types/web-bluetooth': 0.0.16 + '@vueuse/metadata': 9.13.0 + '@vueuse/shared': 9.13.0(vue@3.2.45) + vue-demi: 0.13.11(vue@3.2.45) + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + '@vueuse/metadata@8.9.4': {} + + '@vueuse/metadata@9.13.0': {} + + '@vueuse/shared@8.9.4(vue@3.2.45)': + dependencies: + vue-demi: 0.13.11(vue@3.2.45) + optionalDependencies: + vue: 3.2.45 + + '@vueuse/shared@9.13.0(vue@3.2.45)': + dependencies: + vue-demi: 0.13.11(vue@3.2.45) + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + acorn-jsx@5.3.2(acorn@8.8.1): + dependencies: + acorn: 8.8.1 + + acorn@8.8.1: {} + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-regex@5.0.1: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + argparse@2.0.1: {} + + array-union@2.1.0: {} + + async-validator@4.2.5: {} + + asynckit@0.4.0: {} + + axios@0.27.2: + dependencies: + follow-redirects: 1.15.2 + form-data: 4.0.0 + transitivePeerDependencies: + - debug + + balanced-match@1.0.2: {} + + binary-extensions@2.2.0: {} + + boolbase@1.0.0: {} + + brace-expansion@1.1.11: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.1: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.2: + dependencies: + fill-range: 7.0.1 + + callsites@3.1.0: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + chokidar@3.5.3: + dependencies: + anymatch: 3.1.3 + braces: 3.0.2 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.2 + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + concat-map@0.0.1: {} + + consola@2.15.3: {} + + cross-spawn@7.0.3: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + cssesc@3.0.0: {} + + csstype@2.6.21: {} + + dayjs@1.11.10: {} + + debug@4.3.4: + dependencies: + ms: 2.1.2 + + deep-is@0.1.4: {} + + delayed-stream@1.0.0: {} + + deviceorientation-js@1.0.0: {} + + dir-glob@3.0.1: + dependencies: + path-type: 4.0.0 + + doctrine@3.0.0: + dependencies: + esutils: 2.0.3 + + element-plus@2.7.2(vue@3.2.45): + dependencies: + '@ctrl/tinycolor': 3.6.1 + '@element-plus/icons-vue': 2.3.1(vue@3.2.45) + '@floating-ui/dom': 1.6.3 + '@popperjs/core': '@sxzz/popperjs-es@2.11.7' + '@types/lodash': 4.17.0 + '@types/lodash-es': 4.17.12 + '@vueuse/core': 9.13.0(vue@3.2.45) + async-validator: 4.2.5 + dayjs: 1.11.10 + escape-html: 1.0.3 + lodash: 4.17.21 + lodash-es: 4.17.21 + lodash-unified: 1.0.3(@types/lodash-es@4.17.12)(lodash-es@4.17.21)(lodash@4.17.21) + memoize-one: 6.0.0 + normalize-wheel-es: 1.2.0 + vue: 3.2.45 + transitivePeerDependencies: + - '@vue/composition-api' + + esbuild-android-64@0.14.54: + optional: true + + esbuild-android-arm64@0.14.54: + optional: true + + esbuild-darwin-64@0.14.54: + optional: true + + esbuild-darwin-arm64@0.14.54: + optional: true + + esbuild-freebsd-64@0.14.54: + optional: true + + esbuild-freebsd-arm64@0.14.54: + optional: true + + esbuild-linux-32@0.14.54: + optional: true + + esbuild-linux-64@0.14.54: + optional: true + + esbuild-linux-arm64@0.14.54: + optional: true + + esbuild-linux-arm@0.14.54: + optional: true + + esbuild-linux-mips64le@0.14.54: + optional: true + + esbuild-linux-ppc64le@0.14.54: + optional: true + + esbuild-linux-riscv64@0.14.54: + optional: true + + esbuild-linux-s390x@0.14.54: + optional: true + + esbuild-netbsd-64@0.14.54: + optional: true + + esbuild-openbsd-64@0.14.54: + optional: true + + esbuild-sunos-64@0.14.54: + optional: true + + esbuild-windows-32@0.14.54: + optional: true + + esbuild-windows-64@0.14.54: + optional: true + + esbuild-windows-arm64@0.14.54: + optional: true + + esbuild@0.14.54: + optionalDependencies: + '@esbuild/linux-loong64': 0.14.54 + esbuild-android-64: 0.14.54 + esbuild-android-arm64: 0.14.54 + esbuild-darwin-64: 0.14.54 + esbuild-darwin-arm64: 0.14.54 + esbuild-freebsd-64: 0.14.54 + esbuild-freebsd-arm64: 0.14.54 + esbuild-linux-32: 0.14.54 + esbuild-linux-64: 0.14.54 + esbuild-linux-arm: 0.14.54 + esbuild-linux-arm64: 0.14.54 + esbuild-linux-mips64le: 0.14.54 + esbuild-linux-ppc64le: 0.14.54 + esbuild-linux-riscv64: 0.14.54 + esbuild-linux-s390x: 0.14.54 + esbuild-netbsd-64: 0.14.54 + esbuild-openbsd-64: 0.14.54 + esbuild-sunos-64: 0.14.54 + esbuild-windows-32: 0.14.54 + esbuild-windows-64: 0.14.54 + esbuild-windows-arm64: 0.14.54 + + escape-html@1.0.3: {} + + escape-string-regexp@4.0.0: {} + + escape-string-regexp@5.0.0: {} + + eslint-config-prettier@8.6.0(eslint@8.31.0): + dependencies: + eslint: 8.31.0 + + eslint-plugin-prettier@4.2.1(eslint-config-prettier@8.6.0(eslint@8.31.0))(eslint@8.31.0)(prettier@2.8.2): + dependencies: + eslint: 8.31.0 + prettier: 2.8.2 + prettier-linter-helpers: 1.0.0 + optionalDependencies: + eslint-config-prettier: 8.6.0(eslint@8.31.0) + + eslint-plugin-vue@9.8.0(eslint@8.31.0): + dependencies: + eslint: 8.31.0 + eslint-utils: 3.0.0(eslint@8.31.0) + natural-compare: 1.4.0 + nth-check: 2.1.1 + postcss-selector-parser: 6.0.11 + semver: 7.3.8 + vue-eslint-parser: 9.1.0(eslint@8.31.0) + xml-name-validator: 4.0.0 + transitivePeerDependencies: + - supports-color + + eslint-scope@5.1.1: + dependencies: + esrecurse: 4.3.0 + estraverse: 4.3.0 + + eslint-scope@7.1.1: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-utils@3.0.0(eslint@8.31.0): + dependencies: + eslint: 8.31.0 + eslint-visitor-keys: 2.1.0 + + eslint-visitor-keys@2.1.0: {} + + eslint-visitor-keys@3.3.0: {} + + eslint@8.31.0: + dependencies: + '@eslint/eslintrc': 1.4.1 + '@humanwhocodes/config-array': 0.11.8 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.4 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.1.1 + eslint-utils: 3.0.0(eslint@8.31.0) + eslint-visitor-keys: 3.3.0 + espree: 9.4.1 + esquery: 1.4.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.19.0 + grapheme-splitter: 1.0.4 + ignore: 5.2.4 + import-fresh: 3.3.0 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-sdsl: 4.2.0 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.1 + regexpp: 3.2.0 + strip-ansi: 6.0.1 + strip-json-comments: 3.1.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + + espree@9.4.1: + dependencies: + acorn: 8.8.1 + acorn-jsx: 5.3.2(acorn@8.8.1) + eslint-visitor-keys: 3.3.0 + + esquery@1.4.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@4.3.0: {} + + estraverse@5.3.0: {} + + estree-walker@2.0.2: {} + + esutils@2.0.3: {} + + execa@5.1.1: + dependencies: + cross-spawn: 7.0.3 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + + fast-deep-equal@3.1.3: {} + + fast-diff@1.2.0: {} + + fast-glob@3.2.12: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.5 + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fastq@1.15.0: + dependencies: + reusify: 1.0.4 + + file-entry-cache@6.0.1: + dependencies: + flat-cache: 3.0.4 + + fill-range@7.0.1: + dependencies: + to-regex-range: 5.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@3.0.4: + dependencies: + flatted: 3.2.7 + rimraf: 3.0.2 + + flatted@3.2.7: {} + + follow-redirects@1.15.2: {} + + form-data@4.0.0: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + mime-types: 2.1.35 + + fs.realpath@1.0.0: {} + + fsevents@2.3.2: + optional: true + + function-bind@1.1.1: {} + + get-stream@6.0.1: {} + + glob-parent@5.1.2: + dependencies: + is-glob: 4.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + globals@13.19.0: + dependencies: + type-fest: 0.20.2 + + globby@11.1.0: + dependencies: + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.2.12 + ignore: 5.2.4 + merge2: 1.4.1 + slash: 3.0.0 + + grapheme-splitter@1.0.4: {} + + has-flag@4.0.0: {} + + has@1.0.3: + dependencies: + function-bind: 1.1.1 + + human-signals@2.1.0: {} + + ignore@5.2.4: {} + + immutable@4.2.2: {} + + import-fresh@3.3.0: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + is-binary-path@2.1.0: + dependencies: + binary-extensions: 2.2.0 + + is-core-module@2.11.0: + dependencies: + has: 1.0.3 + + is-extglob@2.1.1: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-number@7.0.0: {} + + is-path-inside@3.0.3: {} + + is-stream@2.0.1: {} + + isexe@2.0.0: {} + + js-sdsl@4.2.0: {} + + js-yaml@4.1.0: + dependencies: + argparse: 2.0.1 + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + jsonc-parser@3.2.0: {} + + kolorist@1.6.0: {} + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + local-pkg@0.4.2: {} + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash-es@4.17.21: {} + + lodash-unified@1.0.3(@types/lodash-es@4.17.12)(lodash-es@4.17.21)(lodash@4.17.21): + dependencies: + '@types/lodash-es': 4.17.12 + lodash: 4.17.21 + lodash-es: 4.17.21 + + lodash.merge@4.6.2: {} + + lodash@4.17.21: {} + + lru-cache@6.0.0: + dependencies: + yallist: 4.0.0 + + magic-string@0.25.9: + dependencies: + sourcemap-codec: 1.4.8 + + magic-string@0.26.7: + dependencies: + sourcemap-codec: 1.4.8 + + memoize-one@6.0.0: {} + + merge-stream@2.0.0: {} + + merge2@1.4.1: {} + + micromatch@4.0.5: + dependencies: + braces: 3.0.2 + picomatch: 2.3.1 + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + mimic-fn@2.1.0: {} + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.11 + + minimatch@5.1.2: + dependencies: + brace-expansion: 2.0.1 + + mlly@0.5.17: + dependencies: + acorn: 8.8.1 + pathe: 1.0.0 + pkg-types: 1.0.1 + ufo: 1.0.1 + + mlly@1.1.0: + dependencies: + acorn: 8.8.1 + pathe: 1.0.0 + pkg-types: 1.0.1 + ufo: 1.0.1 + + ms@2.1.2: {} + + nanoid@3.3.4: {} + + natural-compare-lite@1.4.0: {} + + natural-compare@1.4.0: {} + + normalize-path@3.0.0: {} + + normalize-wheel-es@1.2.0: {} + + npm-run-path@4.0.1: + dependencies: + path-key: 3.1.1 + + nprogress@0.2.0: {} + + nth-check@2.1.1: + dependencies: + boolbase: 1.0.0 + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + onetime@5.1.2: + dependencies: + mimic-fn: 2.1.0 + + optionator@0.9.1: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.3 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + path-parse@1.0.7: {} + + path-type@4.0.0: {} + + pathe@0.3.9: {} + + pathe@1.0.0: {} + + picocolors@1.0.0: {} + + picomatch@2.3.1: {} + + pinia@2.0.28(typescript@4.9.4)(vue@3.2.45): + dependencies: + '@vue/devtools-api': 6.4.5 + vue: 3.2.45 + vue-demi: 0.13.11(vue@3.2.45) + optionalDependencies: + typescript: 4.9.4 + + pkg-types@1.0.1: + dependencies: + jsonc-parser: 3.2.0 + mlly: 1.1.0 + pathe: 1.0.0 + + postcss-selector-parser@6.0.11: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss@8.4.21: + dependencies: + nanoid: 3.3.4 + picocolors: 1.0.0 + source-map-js: 1.0.2 + + prelude-ls@1.2.1: {} + + prettier-linter-helpers@1.0.0: + dependencies: + fast-diff: 1.2.0 + + prettier@2.8.2: {} + + punycode@2.1.1: {} + + queue-microtask@1.2.3: {} + + readdirp@3.6.0: + dependencies: + picomatch: 2.3.1 + + regexpp@3.2.0: {} + + resolve-from@4.0.0: {} + + resolve@1.22.1: + dependencies: + is-core-module: 2.11.0 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + reusify@1.0.4: {} + + rimraf@3.0.2: + dependencies: + glob: 7.2.3 + + rollup@2.77.3: + optionalDependencies: + fsevents: 2.3.2 + + rollup@2.79.1: + optionalDependencies: + fsevents: 2.3.2 + + rotate-canvas@1.0.0: + dependencies: + typescript: 4.9.4 + + run-parallel@1.2.0: + dependencies: + queue-microtask: 1.2.3 + + sass@1.57.1: + dependencies: + chokidar: 3.5.3 + immutable: 4.2.2 + source-map-js: 1.0.2 + + scule@0.2.1: {} + + semver@7.3.8: + dependencies: + lru-cache: 6.0.0 + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + signal-exit@3.0.7: {} + + simple-vue-camera@1.1.3: {} + + slash@3.0.0: {} + + source-map-js@1.0.2: {} + + source-map@0.6.1: {} + + sourcemap-codec@1.4.8: {} + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-final-newline@2.0.0: {} + + strip-json-comments@3.1.1: {} + + strip-literal@0.4.2: + dependencies: + acorn: 8.8.1 + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + supports-preserve-symlinks-flag@1.0.0: {} + + text-table@0.2.0: {} + + to-fast-properties@2.0.0: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + tslib@1.14.1: {} + + tsutils@3.21.0(typescript@4.9.4): + dependencies: + tslib: 1.14.1 + typescript: 4.9.4 + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + type-fest@0.20.2: {} + + typescript@4.9.4: {} + + ufo@1.0.1: {} + + unimport@0.4.7(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1)): + dependencies: + '@rollup/pluginutils': 4.2.1 + escape-string-regexp: 5.0.0 + fast-glob: 3.2.12 + local-pkg: 0.4.2 + magic-string: 0.26.7 + mlly: 0.5.17 + pathe: 0.3.9 + scule: 0.2.1 + strip-literal: 0.4.2 + unplugin: 0.7.2(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1)) + transitivePeerDependencies: + - esbuild + - rollup + - vite + - webpack + + unplugin-auto-import@0.9.5(@vueuse/core@8.9.4(vue@3.2.45))(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1)): + dependencies: + '@antfu/utils': 0.5.2 + '@rollup/pluginutils': 4.2.1 + local-pkg: 0.4.2 + magic-string: 0.26.7 + unimport: 0.4.7(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1)) + unplugin: 0.7.2(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1)) + optionalDependencies: + '@vueuse/core': 8.9.4(vue@3.2.45) + transitivePeerDependencies: + - esbuild + - rollup + - vite + - webpack + + unplugin-icons@0.14.15(@vue/compiler-sfc@3.2.45): + dependencies: + '@antfu/install-pkg': 0.1.1 + '@antfu/utils': 0.7.2 + '@iconify/utils': 2.0.11 + debug: 4.3.4 + kolorist: 1.6.0 + local-pkg: 0.4.2 + unplugin: 1.0.1 + optionalDependencies: + '@vue/compiler-sfc': 3.2.45 + transitivePeerDependencies: + - supports-color + + unplugin-vue-components@0.21.2(@babel/parser@7.20.7)(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1))(vue@3.2.45): + dependencies: + '@antfu/utils': 0.5.2 + '@rollup/pluginutils': 4.2.1 + chokidar: 3.5.3 + debug: 4.3.4 + fast-glob: 3.2.12 + local-pkg: 0.4.2 + magic-string: 0.26.7 + minimatch: 5.1.2 + resolve: 1.22.1 + unplugin: 0.7.2(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1)) + vue: 3.2.45 + optionalDependencies: + '@babel/parser': 7.20.7 + transitivePeerDependencies: + - esbuild + - rollup + - supports-color + - vite + - webpack + + unplugin@0.7.2(esbuild@0.14.54)(rollup@2.79.1)(vite@2.9.15(sass@1.57.1)): + dependencies: + acorn: 8.8.1 + chokidar: 3.5.3 + webpack-sources: 3.2.3 + webpack-virtual-modules: 0.4.6 + optionalDependencies: + esbuild: 0.14.54 + rollup: 2.79.1 + vite: 2.9.15(sass@1.57.1) + + unplugin@1.0.1: + dependencies: + acorn: 8.8.1 + chokidar: 3.5.3 + webpack-sources: 3.2.3 + webpack-virtual-modules: 0.5.0 + + uri-js@4.4.1: + dependencies: + punycode: 2.1.1 + + util-deprecate@1.0.2: {} + + vite-plugin-eslint@1.8.1(eslint@8.31.0)(vite@2.9.15(sass@1.57.1)): + dependencies: + '@rollup/pluginutils': 4.2.1 + '@types/eslint': 8.4.10 + eslint: 8.31.0 + rollup: 2.79.1 + vite: 2.9.15(sass@1.57.1) + + vite@2.9.15(sass@1.57.1): + dependencies: + esbuild: 0.14.54 + postcss: 8.4.21 + resolve: 1.22.1 + rollup: 2.77.3 + optionalDependencies: + fsevents: 2.3.2 + sass: 1.57.1 + + vue-camera-lib@1.0.4: + dependencies: + deviceorientation-js: 1.0.0 + rotate-canvas: 1.0.0 + + vue-demi@0.13.11(vue@3.2.45): + dependencies: + vue: 3.2.45 + + vue-eslint-parser@9.1.0(eslint@8.31.0): + dependencies: + debug: 4.3.4 + eslint: 8.31.0 + eslint-scope: 7.1.1 + eslint-visitor-keys: 3.3.0 + espree: 9.4.1 + esquery: 1.4.0 + lodash: 4.17.21 + semver: 7.3.8 + transitivePeerDependencies: + - supports-color + + vue-i18n@9.2.2(vue@3.2.45): + dependencies: + '@intlify/core-base': 9.2.2 + '@intlify/shared': 9.2.2 + '@intlify/vue-devtools': 9.2.2 + '@vue/devtools-api': 6.4.5 + vue: 3.2.45 + + vue-router@4.1.6(vue@3.2.45): + dependencies: + '@vue/devtools-api': 6.4.5 + vue: 3.2.45 + + vue-tsc@0.34.17(typescript@4.9.4): + dependencies: + '@volar/vue-typescript': 0.34.17 + typescript: 4.9.4 + + vue@3.2.45: + dependencies: + '@vue/compiler-dom': 3.2.45 + '@vue/compiler-sfc': 3.2.45 + '@vue/runtime-dom': 3.2.45 + '@vue/server-renderer': 3.2.45(vue@3.2.45) + '@vue/shared': 3.2.45 + + webpack-sources@3.2.3: {} + + webpack-virtual-modules@0.4.6: {} + + webpack-virtual-modules@0.5.0: {} + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.3: {} + + wrappy@1.0.2: {} + + xml-name-validator@4.0.0: {} + + yallist@4.0.0: {} + + yocto-queue@0.1.0: {} diff --git a/robot_painting/robot-sketch-vue/public/ROSC_logo2_short - mini.ico b/robot_painting/robot-sketch-vue/public/ROSC_logo2_short - mini.ico new file mode 100644 index 0000000000000000000000000000000000000000..d8ec622d56db53ec20bfd2e64724b22650fea7f0 Binary files /dev/null and b/robot_painting/robot-sketch-vue/public/ROSC_logo2_short - mini.ico differ diff --git a/robot_painting/robot-sketch-vue/public/favicon.ico b/robot_painting/robot-sketch-vue/public/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..df36fcfb72584e00488330b560ebcf34a41c64c2 Binary files /dev/null and b/robot_painting/robot-sketch-vue/public/favicon.ico differ diff --git a/robot_painting/robot-sketch-vue/src/App.vue b/robot_painting/robot-sketch-vue/src/App.vue new file mode 100644 index 0000000000000000000000000000000000000000..70473d9033faef6a2eaeec18f51ad826fc7e4f40 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/App.vue @@ -0,0 +1,24 @@ + + + + + diff --git a/robot_painting/robot-sketch-vue/src/apis/request.ts b/robot_painting/robot-sketch-vue/src/apis/request.ts new file mode 100644 index 0000000000000000000000000000000000000000..ecdc53aaf36b08538fe6abe49be2051836929b5a --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/apis/request.ts @@ -0,0 +1,40 @@ +import http from '@/utils/http' +import { da } from 'element-plus/es/locale' + +export const matting = (data: {} | undefined) => { + const formData = new FormData() + formData.append('image', data) + formData.append('model', 'people') + + return http.post('matting', formData, { + headers: { + 'Content-Type': 'multipart/form-data', + }, + }); +} + +export const mattingResult = (resultid: string) => { + return http.get(resultid, {responstType: 'blob'}); +} + + +export const robotSketch = (data: {} | undefined) => { + const formData = new FormData() + formData.append('image_path', data) + + return http.post('robot/sketch', formData, { + headers: { + 'Content-Type': 'multipart/form-data', + }, + }); +} + +export const robotDrawing = (seq_file: {} | undefined) => { + const formData = new FormData() + formData.append('seq_data_file', seq_file) + return http.post('robot/drawing', formData, { + headers: { + 'Content-Type': 'multipart/form-data', + }, + }); +} diff --git a/robot_painting/robot-sketch-vue/src/apis/user.ts b/robot_painting/robot-sketch-vue/src/apis/user.ts new file mode 100644 index 0000000000000000000000000000000000000000..f90724fdfca70566ea2cc15df93d50ccb837a64a --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/apis/user.ts @@ -0,0 +1,6 @@ +import http from '@/utils/http' + +const basePath = 'user/' + +// 登出 +export const logout = () => http.post(`${basePath}logout`) diff --git a/robot_painting/robot-sketch-vue/src/assets/logo.png b/robot_painting/robot-sketch-vue/src/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f3d2503fc2a44b5053b0837ebea6e87a2d339a43 Binary files /dev/null and b/robot_painting/robot-sketch-vue/src/assets/logo.png differ diff --git a/robot_painting/robot-sketch-vue/src/assets/rosc.png b/robot_painting/robot-sketch-vue/src/assets/rosc.png new file mode 100644 index 0000000000000000000000000000000000000000..0388a79528a0c7afeaf247a1bed55a890f234f30 Binary files /dev/null and b/robot_painting/robot-sketch-vue/src/assets/rosc.png differ diff --git a/robot_painting/robot-sketch-vue/src/env.d.ts b/robot_painting/robot-sketch-vue/src/env.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..aafef9509dd5c4364df3b0e29457a63e0c87011b --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/env.d.ts @@ -0,0 +1,8 @@ +/// + +declare module '*.vue' { + import type { DefineComponent } from 'vue' + // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/ban-types + const component: DefineComponent<{}, {}, any> + export default component +} diff --git a/robot_painting/robot-sketch-vue/src/layout/components/language/index.vue b/robot_painting/robot-sketch-vue/src/layout/components/language/index.vue new file mode 100644 index 0000000000000000000000000000000000000000..8b30a4573118a4b62fd9f8afa04cf26ff1bbfe68 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/components/language/index.vue @@ -0,0 +1,39 @@ + + + + + diff --git a/robot_painting/robot-sketch-vue/src/layout/components/setting/index.vue b/robot_painting/robot-sketch-vue/src/layout/components/setting/index.vue new file mode 100644 index 0000000000000000000000000000000000000000..1cb71230a899ecdffe2d8f486949ed2a1e513619 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/components/setting/index.vue @@ -0,0 +1,76 @@ + + + + diff --git a/robot_painting/robot-sketch-vue/src/layout/components/sidebar/breadCrumb.vue b/robot_painting/robot-sketch-vue/src/layout/components/sidebar/breadCrumb.vue new file mode 100644 index 0000000000000000000000000000000000000000..e5435363c09c4e4cd68e4ea181feff37ce00dc98 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/components/sidebar/breadCrumb.vue @@ -0,0 +1,55 @@ + + + + + diff --git a/robot_painting/robot-sketch-vue/src/layout/components/sidebar/hamburger.vue b/robot_painting/robot-sketch-vue/src/layout/components/sidebar/hamburger.vue new file mode 100644 index 0000000000000000000000000000000000000000..d251e08ea0fd0b882476c04b7baf8b2be22de2c4 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/components/sidebar/hamburger.vue @@ -0,0 +1,25 @@ + + + + + diff --git a/robot_painting/robot-sketch-vue/src/layout/components/sidebar/menu/index.vue b/robot_painting/robot-sketch-vue/src/layout/components/sidebar/menu/index.vue new file mode 100644 index 0000000000000000000000000000000000000000..ddb567f5ecf5fd9343aaed324b1e3fe55c1eaf39 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/components/sidebar/menu/index.vue @@ -0,0 +1,110 @@ + + + + diff --git a/robot_painting/robot-sketch-vue/src/layout/components/sidebar/menu/subMenu.vue b/robot_painting/robot-sketch-vue/src/layout/components/sidebar/menu/subMenu.vue new file mode 100644 index 0000000000000000000000000000000000000000..3cbe2f0679d4e3cbca24ea0d605384875aef945b --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/components/sidebar/menu/subMenu.vue @@ -0,0 +1,30 @@ + + + diff --git a/robot_painting/robot-sketch-vue/src/layout/hooks/useNav.ts b/robot_painting/robot-sketch-vue/src/layout/hooks/useNav.ts new file mode 100644 index 0000000000000000000000000000000000000000..983c7512d78bd22f607eaf454d32fc05d4ccc095 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/hooks/useNav.ts @@ -0,0 +1,15 @@ +import { computed } from 'vue' +import { useAppStoreHook } from '@/store/modules/app' + +const app = useAppStoreHook() +const toggleSideBar = app.TOGGLE_SIDEBAR +const isCollapse = computed(() => { + return !app.getSidebarStatus +}) + +export function useNav() { + return { + toggleSideBar, + isCollapse + } +} diff --git a/robot_painting/robot-sketch-vue/src/layout/hooks/useTheme.ts b/robot_painting/robot-sketch-vue/src/layout/hooks/useTheme.ts new file mode 100644 index 0000000000000000000000000000000000000000..12d99bafb40ccfaf312ef143d9d1baa65cb88ff0 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/hooks/useTheme.ts @@ -0,0 +1,29 @@ +import { computed } from 'vue' +import { useDark, useToggle } from '@vueuse/core' +import { useAppStoreHook } from '@/store/modules/app' + +const app = useAppStoreHook() +const isDark = useDark() +const toggleDark = useToggle(isDark) + +const themeColor = computed(() => app.color) +const themeList = [ + '#0960BD', + '#0084F4', + '#009688', + '#536DF3', + '#FF5C93', + '#EE4F12', + '#0096C7', + '#9C27B0', + '#FF9800' +] +const changeTheme = (color: string) => { + if (!color) return + const el = document.documentElement + // 设置 css 变量 + el.style.setProperty('--el-color-primary', color) + useAppStoreHook().SET_COLOR(color) +} + +export { isDark, toggleDark, themeColor, themeList, changeTheme } diff --git a/robot_painting/robot-sketch-vue/src/layout/index.vue b/robot_painting/robot-sketch-vue/src/layout/index.vue new file mode 100644 index 0000000000000000000000000000000000000000..c527ec488d6003bc088720ba28358d2f76cb0cc0 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/layout/index.vue @@ -0,0 +1,55 @@ + + + + + diff --git a/robot_painting/robot-sketch-vue/src/locales/en.ts b/robot_painting/robot-sketch-vue/src/locales/en.ts new file mode 100644 index 0000000000000000000000000000000000000000..9f5cbe091b089fa8077f11106a9bfc8fcda37f7e --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/locales/en.ts @@ -0,0 +1,12 @@ +export default { + buttons: { + login: 'Login', + logout: 'Logout' + }, + menus: { + home: 'Home' + }, + txts: { + projectConfiguration: 'Project Configuration' + } +} diff --git a/robot_painting/robot-sketch-vue/src/locales/index.ts b/robot_painting/robot-sketch-vue/src/locales/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..c7569739193b9941dfb2d2d6155e3ee6dbf8a79a --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/locales/index.ts @@ -0,0 +1,20 @@ +import { createI18n } from 'vue-i18n' +import { storageLocal } from '@/utils/storage' +import zhCn from './zh-cn' +import en from './en' + +// 语言配置整合 +const messages = { + zhCn, + en +} + +// 创建 i18n +const i18n = createI18n({ + legacy: false, + globalInjection: true, // 全局模式,可以直接使用 $t + locale: storageLocal.getItem('app')?.locale || 'zhCn', + messages: messages +}) + +export default i18n diff --git a/robot_painting/robot-sketch-vue/src/locales/zh-cn.ts b/robot_painting/robot-sketch-vue/src/locales/zh-cn.ts new file mode 100644 index 0000000000000000000000000000000000000000..8d52f5e609d53e37c25cf23e1458c3b510edb915 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/locales/zh-cn.ts @@ -0,0 +1,12 @@ +export default { + buttons: { + login: '登录', + logout: '登出' + }, + menus: { + home: '首页' + }, + txts: { + projectConfiguration: '项目设置' + } +} diff --git a/robot_painting/robot-sketch-vue/src/main.ts b/robot_painting/robot-sketch-vue/src/main.ts new file mode 100644 index 0000000000000000000000000000000000000000..b2d2d1fe584d552c277a59dd26aabaee62181e9b --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/main.ts @@ -0,0 +1,16 @@ +import { createApp } from 'vue' +import App from './App.vue' +import router from './router' +import { setupStore } from './store' +import i18n from './locales' +import 'element-plus/theme-chalk/dark/css-vars.css' +import ElementPlus from 'element-plus' + + +const app = createApp(App) +app.use(ElementPlus) +app.use(router) +app.use(i18n) +setupStore(app) + +app.mount('#app') diff --git a/robot_painting/robot-sketch-vue/src/router/index.ts b/robot_painting/robot-sketch-vue/src/router/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..26dc922ab9ae8434ef7d707ebfed2210514050b7 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/router/index.ts @@ -0,0 +1,98 @@ +import { createRouter, createWebHistory, RouteRecordRaw } from 'vue-router' +import 'nprogress/nprogress.css' +import NProgress from 'nprogress' +import { getUser } from '@/utils/auth' + +const routes: RouteRecordRaw[] = [ + { + path: '/', + name: 'layout', + redirect: { + path: '/home' + }, + component: () => import('@/layout/index.vue'), + children: [ + { + path: '/home', + name: 'home', + component: () => import('@/views/sketch/index.vue'), + meta: { + requireAuth: false + } + }, + { + path: '/psychology', + name: 'psychology', + component: () => import('@/views/psychology.vue'), + meta: { + requireAuth: false + } + }, + { + path: 'sketch', + name: 'sketch', + component: () => import('@/views/sketch/index.vue'), + meta: { + requireAuth: false + } + }, + { + path: '/fiction', + name: 'fiction', + component: () => import('@/views/noval/fiction.vue'), + meta: { + requireAuth: false + } + }, + { + path: '/fictionDetail', + name: 'fictionDetail', + component: () => import('@/views/noval/fictionDetail.vue'), + meta: { + requireAuth: false, + parentMenu: 'fiction' + } + } + ] + }, + { + path: '/login', + name: 'login', + component: () => import('@/views/login.vue') + }, + { + path: '/:pathMatch(.*)', + name: 'error', + component: () => import('@/views/error.vue') + } +] + +const router = createRouter({ + history: createWebHistory(import.meta.env.VITE_PUBLIC_PATH), + routes +}) + +// 路由拦截 +router.beforeEach((to, from, next) => { + NProgress.start() + + const token: string | null = getUser('token') + if (token) { + to.path === '/login' ? next('/') : next() + return + } + if (to.meta.requireAuth) { + next({ + path: '/login', + replace: true + }) + return + } + next() +}) + +router.afterEach(() => { + NProgress.done() +}) + +export default router diff --git a/robot_painting/robot-sketch-vue/src/store/index.ts b/robot_painting/robot-sketch-vue/src/store/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..eae2b3ca211c6adc38eed1e4ea7201118fe8c716 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/store/index.ts @@ -0,0 +1,9 @@ +import type { App } from 'vue' +import { createPinia } from 'pinia' +const store = createPinia() + +export function setupStore(app: App) { + app.use(store) +} + +export { store } diff --git a/robot_painting/robot-sketch-vue/src/store/modules/app.ts b/robot_painting/robot-sketch-vue/src/store/modules/app.ts new file mode 100644 index 0000000000000000000000000000000000000000..8c0af8e7ba7850e361795f7a293d026784fe827b --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/store/modules/app.ts @@ -0,0 +1,43 @@ +import { defineStore } from 'pinia' +import { store } from '@/store' +import i18n from '@/locales' +import { storageLocal } from '@/utils/storage' +import { appType } from './types' + +const useAppStore = defineStore('app', { + state: (): appType => { + const app = storageLocal.getItem('app') + const el = document.documentElement + const themeColor = getComputedStyle(el).getPropertyValue(`--el-color-primary`) + return { + sidebar: { + opened: true + }, + locale: app?.locale || 'zhCn', + color: app?.color || themeColor + } + }, + getters: { + getSidebarStatus(): boolean { + return this.sidebar.opened + } + }, + actions: { + TOGGLE_SIDEBAR() { + this.sidebar.opened = !this.sidebar.opened + }, + SET_LOCALE(locale: string) { + this.locale = locale + i18n.global.locale.value = locale + storageLocal.setObjItem('app', { locale }) + }, + SET_COLOR(color: string) { + this.color = color + storageLocal.setObjItem('app', { color }) + } + } +}) + +export function useAppStoreHook() { + return useAppStore(store) +} diff --git a/robot_painting/robot-sketch-vue/src/store/modules/types.ts b/robot_painting/robot-sketch-vue/src/store/modules/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..8f675d2c67f82f5a3f4b08e8f99504841a96a7ef --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/store/modules/types.ts @@ -0,0 +1,7 @@ +export type appType = { + sidebar: { + opened: boolean + } + locale: string + color: string +} diff --git a/robot_painting/robot-sketch-vue/src/store/modules/user.ts b/robot_painting/robot-sketch-vue/src/store/modules/user.ts new file mode 100644 index 0000000000000000000000000000000000000000..115135b1d79e7cf0bb49c0705e26167a581a3cf2 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/store/modules/user.ts @@ -0,0 +1,31 @@ +import { defineStore } from 'pinia' +import { store } from '@/store' +import { userType } from '@/types' +import { getUser } from '@/utils/auth' + +const useUserStore = defineStore('user', { + state: () => { + return { + token: getUser('token'), + hasPermission: !!getUser('token'), + userId: getUser('userId') + } + }, + actions: { + SET_USER_INFO(data: userType) { + const { token, userId } = data + this.token = token + this.hasPermission = !!token + this.userId = userId + }, + REMOVE_USER_INFO() { + this.token = null + this.hasPermission = false + this.userId = null + } + } +}) + +export function useUserStoreHook() { + return useUserStore(store) +} diff --git a/robot_painting/robot-sketch-vue/src/style/common.scss b/robot_painting/robot-sketch-vue/src/style/common.scss new file mode 100644 index 0000000000000000000000000000000000000000..8e8760da766bad863a6f9d6db885fe6f50fbbc32 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/style/common.scss @@ -0,0 +1,51 @@ +// 文本部分 +// 字体编码 +@mixin font($num) { + font-size: $num + px; +} +// 文本最多显示N行,超出部分隐藏显示... +@mixin line-clamp($num: 1, $width: 100%) { + width: $width; + word-break: break-all; + overflow: hidden; + -ms-text-overflow: ellipsis; + text-overflow: ellipsis; + display: -webkit-box; + -webkit-line-clamp: $num; + -webkit-box-orient: vertical; +} + +// 布局部分 +// flex布局 默认居中 +@mixin flex($justify: center, $align: center) { + display: -webkit-box; + display: -moz-box; + display: -webkit-flex; + display: -moz-flex; + display: -ms-flexbox; + display: flex; + justify-content: $justify; // flex-start | flex-end | center | space-between | space-around + align-items: $align; // flex-start | flex-end | center | baseline | stretch +} +// position居中 +@mixin position-center { + position: absolute; + top: 50%; + left: 50%; + -webkit-transform: translate(-50%, -50%); + -moz-transform: translate(-50%, -50%); + -ms-transform: translate(-50%, -50%); + -o-transform: translate(-50%, -50%); + transform: translate(-50%, -50%); +} +// 浮动 +@mixin float($layout: left) { + float: $layout; +} + +// 其他样式 +// 设置宽高 +@mixin wh($width, $height) { + width: $width + px; + height: $height + px; +} diff --git a/robot_painting/robot-sketch-vue/src/style/index.scss b/robot_painting/robot-sketch-vue/src/style/index.scss new file mode 100644 index 0000000000000000000000000000000000000000..a667f27fa2328c03f866ef17999cc39aef06e567 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/style/index.scss @@ -0,0 +1,32 @@ +@import './common.scss'; + +html, +body, +#app { + width: 100%; + height: 100%; + overflow: hidden; + font-size: 12px; + font-family: Microsoft Yahei, PingFangSC-Semibold, PingFang SC !important; +} + +.icon-hover { + cursor: pointer; + font-size: 16px; + :hover { + color: var(--el-color-primary); + } +} + +// element +.el-aside { + border-right: solid 1px var(--el-menu-border-color); + .el-menu { + border-right: none; + } +} + +// nprogress +#nprogress .spinner { + display: none !important; +} diff --git a/robot_painting/robot-sketch-vue/src/types/index.ts b/robot_painting/robot-sketch-vue/src/types/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..9e22dffae60393760081c671404a2a98772d2618 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/types/index.ts @@ -0,0 +1,5 @@ +export type userType = { + token: string + userName: string + userId: number +} diff --git a/robot_painting/robot-sketch-vue/src/utils/auth/index.ts b/robot_painting/robot-sketch-vue/src/utils/auth/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..12fb34779ad7b7b76825d948d31f8c4446d851d2 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/utils/auth/index.ts @@ -0,0 +1,31 @@ +import { useUserStoreHook } from '@/store/modules/user' +import { userType } from '@/types' +import { storageLocal } from '@/utils/storage' +import config from '../../../config' + +const prefix = config.PROJECT_NAME + +export function getUser(key: string) { + const user = storageLocal.getItem('user') || {} + return user[key] +} + +// 设置用户信息 +export function setUser(data: userType) { + storageLocal.setItem('user', data) + useUserStoreHook().SET_USER_INFO(data) +} + +// 删除用户信息 +export function removeUser() { + storageLocal.removeItem('user') + useUserStoreHook().REMOVE_USER_INFO() +} + +// 手动删除localstorage时清空userStore +window.addEventListener('storage', function (e) { + if ((!e.key || e.key === `${prefix}-user`) && !e.newValue) { + useUserStoreHook().REMOVE_USER_INFO() + window.location.reload() + } +}) diff --git a/robot_painting/robot-sketch-vue/src/utils/http/index.ts b/robot_painting/robot-sketch-vue/src/utils/http/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..56a8f14177c972ab7964a600d628a73267e4a69e --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/utils/http/index.ts @@ -0,0 +1,74 @@ +import axios from 'axios' +import router from '@/router' +import { useUserStoreHook } from '@/store/modules/user' +import { removeUser } from '@/utils/auth' +import { message, messageBox } from '@/utils/message' + +enum Msgs { + '操作成功' = 200, + '无权操作' = 401, + '系统内部错误' = 500 +} + +// 避免多个接口401弹出多个弹框 +let isRefreshing = false + +const { DEV, VITE_PROXY_DOMAIN, VITE_PROXY_DOMAIN_REAL } = import.meta.env +// 创建http实例 +const instance = axios.create({ + baseURL: DEV ? VITE_PROXY_DOMAIN : VITE_PROXY_DOMAIN_REAL, + timeout: 20000, + headers: { + 'Content-Type': 'application/json;charset=UTF-8' + } +}) + +// 添加请求拦截器 +instance.interceptors.request.use((config) => { + config.headers = config.headers || {} + const token = useUserStoreHook().token + if (token) { + config.headers['User-Token'] = token + } + return config +}) + +// 添加响应拦截器 +instance.interceptors.response.use( + (res) => res, + (err) => { + const res = err.response + const code: number = res.status + if (res.config.dontShowToast) { + return Promise.reject(res) + } + if (code === 401) { + if (isRefreshing) { + return Promise.reject(res) + } + isRefreshing = true + removeUser() + messageBox({ + title: Msgs[code], + message: '您暂无操作权限,请登录' + }) + .then(() => { + router.push('/login') + }) + .finally(() => (isRefreshing = false)) + return Promise.reject(res) + } + message({ message: Msgs[code] || '请求失败', type: 'error' }) + return Promise.reject(res) + } +) + +const http = { + get: (url = '', params = {}) => instance.get(url, { params }), + post: (url = '', data = {}, config = {}) => instance.post(url, data, config), + put: (url = '', data = {}) => instance.put(url, data), + delete: (url = '', data = {}) => instance.delete(url, data), + patch: (url = '', data = {}) => instance.patch(url, data) +} + +export default http diff --git a/robot_painting/robot-sketch-vue/src/utils/message/index.ts b/robot_painting/robot-sketch-vue/src/utils/message/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..4658ffaa69bbef298e4bc96da4b94f26eaeff6b3 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/utils/message/index.ts @@ -0,0 +1,13 @@ +import { ElMessage, ElMessageBox, messageType } from 'element-plus' + +// 文字提示 +const message = ({ message = '', type = 'warning' }) => { + return ElMessage({ message, type }) +} + +// 文字提示 +const messageBox = ({ title = '', message = '', type = 'warning' }) => { + return ElMessageBox({ title, message, type }) +} + +export { message, messageBox } diff --git a/robot_painting/robot-sketch-vue/src/utils/storage/index.ts b/robot_painting/robot-sketch-vue/src/utils/storage/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..bc004e513fa9280887b1fd8111e890e930d3806a --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/utils/storage/index.ts @@ -0,0 +1,59 @@ +import config from '../../../config' + +const prefix = config.PROJECT_NAME + +interface ProxyStorage { + getItem(key: string): any + setItem(Key: string, value: string): void + removeItem(key: string): void + clear(): void +} + +//sessionStorage operate +class sessionStorageProxy implements ProxyStorage { + protected storage: ProxyStorage + + constructor(storageModel: ProxyStorage) { + this.storage = storageModel + } + + // 存 + public setItem(key: string, value: any): void { + this.storage.setItem(`${prefix}-${key}`, JSON.stringify(value)) + } + + // 取 + public getItem(key: string): any { + return JSON.parse(this.storage.getItem(`${prefix}-${key}`)) + } + + // 删 + public removeItem(key: string): void { + this.storage.removeItem(`${prefix}-${key}`) + } + + // 清空 + public clear(): void { + Object.keys(this.storage).forEach((key) => { + if (key.startsWith(prefix)) this.storage.removeItem(key) + }) + // this.storage.clear() + } + + // 改 + public setObjItem(key: string, item: object): any { + const data = this.getItem(key) || {} + this.setItem(key, { ...data, ...item }) + } +} + +//localStorage operate +class localStorageProxy extends sessionStorageProxy implements ProxyStorage { + constructor(localStorage: ProxyStorage) { + super(localStorage) + } +} + +export const storageSession = new sessionStorageProxy(sessionStorage) + +export const storageLocal = new localStorageProxy(localStorage) diff --git a/robot_painting/robot-sketch-vue/src/views/error.vue b/robot_painting/robot-sketch-vue/src/views/error.vue new file mode 100644 index 0000000000000000000000000000000000000000..0b6745e38cc1d9dc183b2b143fcc64a984805629 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/views/error.vue @@ -0,0 +1,20 @@ + + + + diff --git a/robot_painting/robot-sketch-vue/src/views/home.vue b/robot_painting/robot-sketch-vue/src/views/home.vue new file mode 100644 index 0000000000000000000000000000000000000000..09d9d0d42e72f4ec7fa654522409967868745569 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/views/home.vue @@ -0,0 +1,3 @@ + diff --git a/robot_painting/robot-sketch-vue/src/views/login.vue b/robot_painting/robot-sketch-vue/src/views/login.vue new file mode 100644 index 0000000000000000000000000000000000000000..85e58f3863b7e95ef55eface08b5170442e98d36 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/views/login.vue @@ -0,0 +1,3 @@ + diff --git a/robot_painting/robot-sketch-vue/src/views/noval/fiction.vue b/robot_painting/robot-sketch-vue/src/views/noval/fiction.vue new file mode 100644 index 0000000000000000000000000000000000000000..3bc3cf6542bfe85b97a7bd0891a45df0e61c2461 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/views/noval/fiction.vue @@ -0,0 +1,11 @@ + + + diff --git a/robot_painting/robot-sketch-vue/src/views/noval/fictionDetail.vue b/robot_painting/robot-sketch-vue/src/views/noval/fictionDetail.vue new file mode 100644 index 0000000000000000000000000000000000000000..d5a978d14c0c2759aef273af919dd79e05fbccf4 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/views/noval/fictionDetail.vue @@ -0,0 +1,3 @@ + diff --git a/robot_painting/robot-sketch-vue/src/views/psychology.vue b/robot_painting/robot-sketch-vue/src/views/psychology.vue new file mode 100644 index 0000000000000000000000000000000000000000..21d71827e2e8a8e984247d7c5a04c013dace2ee9 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/views/psychology.vue @@ -0,0 +1,3 @@ + diff --git a/robot_painting/robot-sketch-vue/src/views/sketch/index.vue b/robot_painting/robot-sketch-vue/src/views/sketch/index.vue new file mode 100644 index 0000000000000000000000000000000000000000..04e2498aab92b7eca50b386397bcb78d6f22f183 --- /dev/null +++ b/robot_painting/robot-sketch-vue/src/views/sketch/index.vue @@ -0,0 +1,305 @@ + + + + + diff --git a/robot_painting/robot-sketch-vue/tsconfig.json b/robot_painting/robot-sketch-vue/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..a36f0c9f5c0cb47854785b2d4a22e3a409112e5a --- /dev/null +++ b/robot_painting/robot-sketch-vue/tsconfig.json @@ -0,0 +1,28 @@ +{ + "compilerOptions": { + "target": "esnext", + "useDefineForClassFields": true, + "module": "esnext", + "moduleResolution": "node", + "strict": true, + "jsx": "preserve", + "sourceMap": true, + "resolveJsonModule": true, + "isolatedModules": true, + "esModuleInterop": true, + "lib": ["esnext", "dom"], + "skipLibCheck": true, + "baseUrl": ".", + "paths": { + "@/*": ["src/*"] + } + }, + "include": [ + "src/**/*.ts", + "src/**/*.d.ts", + "src/**/*.tsx", + "src/**/*.vue", + "./auto-imports.d.ts" + ], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/robot_painting/robot-sketch-vue/tsconfig.node.json b/robot_painting/robot-sketch-vue/tsconfig.node.json new file mode 100644 index 0000000000000000000000000000000000000000..a336f895aab529dbb1f6a5878f2873ad6a78a3da --- /dev/null +++ b/robot_painting/robot-sketch-vue/tsconfig.node.json @@ -0,0 +1,9 @@ +{ + "compilerOptions": { + "composite": true, + "module": "esnext", + "moduleResolution": "node", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/robot_painting/robot-sketch-vue/vite.config.ts b/robot_painting/robot-sketch-vue/vite.config.ts new file mode 100644 index 0000000000000000000000000000000000000000..efb137b79123bd06e7f7ee32867fefb6b6dd29fc --- /dev/null +++ b/robot_painting/robot-sketch-vue/vite.config.ts @@ -0,0 +1,114 @@ +import { defineConfig, loadEnv } from 'vite' +import vue from '@vitejs/plugin-vue' +import path from 'path' +import eslintPlugin from 'vite-plugin-eslint' +import AutoImport from 'unplugin-auto-import/vite' // 自动导入 +import Components from 'unplugin-vue-components/vite' // 组件注册 +import { ElementPlusResolver } from 'unplugin-vue-components/resolvers' // elementPlus +import Icons from 'unplugin-icons/vite' // icon相关 +import IconsResolver from 'unplugin-icons/resolver' // icon相关 + +export default defineConfig(({ mode }) => { + const { VITE_PUBLIC_PATH, VITE_PROXY_DOMAIN, VITE_PROXY_DOMAIN_REAL, VITE_PROXY_DOMAIN_REAL2 } = loadEnv( + mode, + process.cwd() + ) + const domain = { + '/api/robot': { + target: VITE_PROXY_DOMAIN_REAL2, + changeOrigin: true, + rewrite: (path) => path.replace(new RegExp('^/api/robot'), '') + }, + [VITE_PROXY_DOMAIN]: { + target: VITE_PROXY_DOMAIN_REAL, + // ws: true, + changeOrigin: true, + rewrite: (path) => path.replace(new RegExp(`^${VITE_PROXY_DOMAIN}`), '') + }, + + } + const proxy = VITE_PROXY_DOMAIN_REAL.length > 0 ? domain : null + + return { + base: VITE_PUBLIC_PATH, //打包路径 + plugins: [ + vue(), + eslintPlugin({ + include: ['src/**/*.js', 'src/**/*.vue', 'src/*.js', 'src/*.vue'] + }), + AutoImport({ + // 自动导入 Vue 相关函数,如:ref, reactive, toRef 等 + // imports: ['vue'], + eslintrc: { + enabled: true + }, + resolvers: [ + // 自动导入 Element Plus 相关函数,如:ElMessage, ElMessageBox... (带样式) + ElementPlusResolver(), + // 自动导入图标组件 + IconsResolver({ + prefix: 'Icon' + }) + ] + }), + Components({ + resolvers: [ + // 自动导入 Element Plus 组件 + ElementPlusResolver(), + // 自动注册图标组件 + IconsResolver({ + enabledCollections: ['ep'] // 重点 + }) + ] + }), + Icons({ + autoInstall: true + }) + ], + // 配置别名 + resolve: { + alias: { + '@': path.resolve(__dirname, 'src'), + 'vue-i18n': 'vue-i18n/dist/vue-i18n.cjs.js' + } + }, + css: { + preprocessorOptions: { + scss: { + additionalData: `@use "@/style/index.scss" as *;` + } + } + }, + //启动服务配置 + server: { + host: '0.0.0.0', + port: 8000, + open: false, + https: false, + proxy + }, + // 生产环境打包配置 + //去除 console debugger + build: { + minify: 'terser', + terserOptions: { + compress: { + drop_console: true, + drop_debugger: true + } + }, + rollupOptions: { + output: { + chunkFileNames: 'static/js/[name]-[hash].js', + entryFileNames: 'static/js/[name]-[hash].js', + assetFileNames: 'static/[ext]/[name]-[hash].[ext]', + manualChunks(id) { + if (id.includes('node_modules')) { + return id.toString().split('node_modules/')[1].split('/')[0].toString() + } + } + } + } + } + } +}) diff --git a/test_tools/ci_test.sh b/test_tools/ci_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..701e020b7eab408cc9949cb192f72184e6f860dc --- /dev/null +++ b/test_tools/ci_test.sh @@ -0,0 +1,371 @@ +#!/bin/bash + +set -e + +# 测试镜像类型: tiny 或 std +IMAGE_TYPE= +# 测试镜像编译框架:aarch64 或 arm +BUILD_ARCH= +# 测试镜像编译分支: master openEuler-22.03-LTS openEuler-22.09 +BUILD_BRANCH= +# 构建镜像所在的路径 +IMAGE_OUT_DIR= +# 测试框架下载、执行的路径 +TEST_WORK_DIR= + +# 测试运行的测试套名 +run_suitecase= +# 可以忽略的测试失败用例 +ignoreFail= +#qemu启动时等待的时间 +qemu_option_wait_time= +# 多测试套结果保存文件夹 +results_path= + +# 测试结果 +exitCode=0 + +function printLine() { + lineLen=$1 + for i in $(seq 1 $lineLen); do + echo -n "-" + done + echo " " +} + +function printSpace() { + lineLen=$1 + for i in $(seq 1 $lineLen); do + echo -n " " + done +} + +function printItem() { + suiteName=$1 + printCase=$2 + result=$3 + + suiteLen=$( echo $suiteName | wc -L ) + suiteSpaceLen=`expr $suiteMaxLen - $suiteLen` + resultLen=$( echo $result | wc -L ) + resultSpaceLen=`expr 8 - $resultLen` + + caseLen=$( echo $printCase | wc -L ) + caseSpaceLen=`expr $caseMaxLen - $caseLen` + echo -n "| $suiteName" + printSpace $suiteSpaceLen + echo -n " | $printCase" + printSpace $caseSpaceLen + echo -n " | $result" + printSpace $resultSpaceLen + echo "|" +} + +function result_output() { + # 打印所有用例执行结果 + suiteMaxLen=$( ls ${results_path} | wc -L ) + caseMaxLen=8 + for one_suite in ${run_suitecase[@]}; do + tmpSucceedLen=0 + tmpFailedLen=0 + tmpMax=0 + if [ -e ${results_path}/${one_suite}/succeed ]; then + tmpSucceedLen=$( ls ${results_path}/${one_suite}/succeed | wc -L ) + fi + if [ -e ${results_path}/${one_suite}/failed ]; then + tmpFailedLen=$( ls ${results_path}/${one_suite}/failed | wc -L ) + fi + if [ $tmpSucceedLen -gt $tmpFailedLen ]; then + tmpMax=$tmpSucceedLen + else + tmpMax=$tmpFailedLen + fi + if [ $tmpMax -gt $caseMaxLen ]; then + caseMaxLen=$tmpMax + fi + done + suiteMaxLen=`expr $suiteMaxLen + 1` + caseMaxLen=`expr $caseMaxLen + 1` + totalLen=`expr $suiteMaxLen + $caseMaxLen + 17` + + printLine $totalLen + printItem "testsuite" "testcase" "result" + printLine $totalLen + for one_suite in ${run_suitecase[@]}; do + succeedCases="" + failCases="" + if [ -e ${results_path}/${one_suite}/succeed ]; then + succeedCases=$(ls ${results_path}/${one_suite}/succeed) + fi + if [ -e ${results_path}/${one_suite}/failed ]; then + failCases=$(ls ${results_path}/${one_suite}/failed) + fi + if [ -n "$succeedCases" ]; then + for one_case in $succeedCases; do + printItem $one_suite $one_case "succeed" + done + fi + if [ -n "$failCases" ]; then + for one_case in $failCases; do + if [[ "${ignoreFail[*]}" =~ "${one_case}" ]]; then + printItem $one_suite $one_case "ignore" + else + printItem $one_suite $one_case "failed" + fi + done + fi + done + printLine $totalLen +} + +function test_result_ana() { + successNum=0 + for one_suite in ${run_suitecase[@]}; do + checkFail="" + if [ -e ${results_path}/${one_suite}/failed ]; then + checkFail=$(ls ${results_path}/${one_suite}/failed/) + fi + + if [ -e ${results_path}/${one_suite}/succeed ]; then + tmpSuccessNum=$(ls ${results_path}/${one_suite}/succeed/ | wc -l) + successNum=`expr $successNum + $tmpSuccessNum` + fi + + for oneFail in ${checkFail[@]}; do + if [[ "${ignoreFail[*]}" =~ "${oneFail}" ]]; then + echo "INFO: ignore $oneFail test fail" + else + echo "ERROR: run $oneFail test fail" + exitCode=1 + fi + done + done + + if [ $successNum -eq 0 ]; then + exitCode=1 + fi + + result_output +} + +function run_test() { + # 执行测试 + pushd "${run_test_dir}/mugen" + # 安装测试执行依赖 + sh -x dep_install.sh -e + + if [[ $IMAGE_TYPE == "std" ]]; then + # 为qemu配置一个可用的IP地址,防止有其他qemu已经占用IP + last_ip_num=$(($RANDOM%250+2)) + can_ip_use=0 + for i in {1...10}; do + ret=0 + ping "192.168.10.${last_ip_num}" -c 1 || can_ip_use=1 + if [ $can_ip_use -eq 1 ]; then + break; + fi + done + if [ $can_ip_use -eq 0 ]; then + echo "ERROR: can't get a ip set to qemu." + exit 1 + fi + last_pwd_num=$(($RANDOM%1000)) + # 启动qemu + wait_login_str="openEuler Embedded(openEuler Embedded Reference Distro)" + if [[ ${BUILD_BRANCH} == "openEuler-22.03-LTS" ]]; then + wait_login_str="login:" + fi + + sh -x qemu_ctl.sh start --qemu_type "${BUILD_ARCH}" \ + --passwd "openEuler@${last_pwd_num}" \ + --host_ip "192.168.10.1" \ + --qemu_ip "192.168.10.${last_ip_num}" \ + --put_all \ + --kernal_img_path "${run_test_dir}/image/${BUILD_ARCH}/zImage" \ + --initrd_path "${run_test_dir}/image/${BUILD_ARCH}/initrd" \ + --login_wait_str "${wait_login_str}" \ + --option_wait_time ${qemu_option_wait_time} + rem_run_str="-s" + need_env_str="" + elif [[ $IMAGE_TYPE == "tiny" ]]; then + mkdir -p conf + echo '{ + "NODE": [ + { + "ID": 1, + "LOCALTION": "local", + "MACHINE": "physical", + "FRAME": "aarch64", + "NIC": "lo0", + "MAC": "", + "IPV4": "127.0.0.1", + "USER": "root", + "PASSWORD": "", + "SSH_PORT": 22, + "BMC_IP": "", + "BMC_USER": "", + "BMC_PASSWORD": "" + } + ] + }'>> conf/env.json + export FIND_TINY_DIR=${outputdir} + rem_run_str="" + need_env_str="-E" + fi + + for one_suite in ${run_suitecase[@]}; do + # 执行测试套编译准备 + bash mugen.sh -b ${one_suite} + # set -e 后如果用例失败则会推出 这里改成所有都返回0 后面再统计 + echo " + pushd "${run_test_dir}/mugen" + bash mugen.sh -f ${one_suite} ${rem_run_str} + popd + exit 0 + ">> ${run_test_dir}/tmp_test_run_${one_suite}.sh + sh -x ${run_test_dir}/tmp_test_run_${one_suite}.sh + # 拷贝测试套执行结果 + if [ -e ${run_test_dir}/mugen/results/${one_suite} ]; then + cp -Rrf ${run_test_dir}/mugen/results/${one_suite} ${results_path}/ + else + continue + fi + rm -rf ${run_test_dir}/tmp_test_run_${one_suite}.sh + done + + if [[ $IMAGE_TYPE == "std" ]]; then + # 关闭qemu + sh qemu_ctl.sh stop + elif [[ $IMAGE_TYPE == "tiny" ]]; then + rm -rf conf/env.json + fi + popd +} + +function set_param() { + IMAGE_TYPE=$1 + BUILD_ARCH=$2 + BUILD_BRANCH=$3 + IMAGE_OUT_DIR=$4 + TEST_WORK_DIR=$5 + run_suitecase=$6 + ignoreFail=$7 + qemu_option_wait_time=$8 + + if [ -z $IMAGE_TYPE ]; then + IMAGE_TYPE="std" + fi + if [ -z $BUILD_ARCH ]; then + BUILD_ARCH="aarch64" + fi + if [ -z $BUILD_BRANCH ]; then + BUILD_BRANCH="master" + if [[ ${BUILD_BRANCH} == "yocto_refactor" || ${BUILD_BRANCH} == "gitee_pages" ]]; then + BUILD_BRANCH="master" + fi + fi + if [ -z $IMAGE_OUT_DIR ]; then + IMAGE_OUT_DIR="/usr1/output" + fi + if [ -z $TEST_WORK_DIR ]; then + TEST_WORK_DIR="/usr1/ci_test" + mkdir -p $TEST_WORK_DIR + fi + + if [ -z $run_suitecase ]; then + # 设置需要执行的测试套, 目前一致, 后面一定会不同 + if [[ $IMAGE_TYPE == "std" ]]; then + run_suitecase=("embedded_security_config_test" "embedded_os_basic_test") + elif [[ $IMAGE_TYPE == "tiny" ]]; then + run_suitecase=("embedded_tiny_image_test") + fi + fi + + if [ -z $qemu_option_wait_time ]; then + qemu_option_wait_time=180 + fi +} + +function main() { + br_nf_iptab_num=1 + if [ -e /proc/sys/net/bridge/bridge-nf-call-iptables ]; then + br_nf_iptab_num=$(sudo cat /proc/sys/net/bridge/bridge-nf-call-iptables) + echo 0 | sudo tee /proc/sys/net/bridge/bridge-nf-call-iptables + fi + + set_param "$@" + + run_test_dir="${TEST_WORK_DIR}/test_run_dir_${BUILD_ARCH}_${IMAGE_TYPE}" + SDK_INSTALL_PATH=${run_test_dir}/sdk/${BUILD_ARCH} + + # 清理现场 防止上次构建有残留 + if [[ -e ${run_test_dir}/mugen/qemu_ctl.sh && -e ${run_test_dir}/mugen/conf/qemu_info.json ]]; then + sh ${run_test_dir}/mugen/qemu_ctl.sh stop + fi + # 删除工作目录 + rm -rf ${run_test_dir}/mugen + rm -rf ${run_test_dir}/image + rm -rf ${SDK_INSTALL_PATH} + # 创建工作目录 + mkdir -p ${run_test_dir}/sdk + mkdir -p ${run_test_dir}/image/${BUILD_ARCH} + mkdir -p ${SDK_INSTALL_PATH} + + # 查找镜像并按照事件排序(防止给定路径有多个镜像) + zImage_path=$(find ${IMAGE_OUT_DIR} -name "zImage" | xargs ls -ta | head -n 1) + initrd_path=$(find ${IMAGE_OUT_DIR} -name "openeuler-image-*qemu-*.rootfs.cpio.gz" | xargs ls -ta) + # initrd_path在22.09会有.live的也查找到 + for one_initrd in ${initrd_path[@]}; do + if [[ "${one_initrd}" =~ "live" ]]; then + continue + else + initrd_path=${one_initrd} + break + fi + done + cp -r ${zImage_path} ${run_test_dir}/image/${BUILD_ARCH}/zImage + cp -r ${initrd_path} ${run_test_dir}/image/${BUILD_ARCH}/initrd + # 查找并配置SDK + if [[ $IMAGE_TYPE == "std" ]]; then + toolchain_path=$(find ${IMAGE_OUT_DIR} -name "openeuler-glibc-*-toolchain-*.sh") + cp -r ${toolchain_path} ${run_test_dir}/sdk/toolchain.sh + + # 配置sdk + sh ${run_test_dir}/sdk/toolchain.sh <