Commit b76a55dd authored by l.kaesberg's avatar l.kaesberg
Browse files

Merge branch 'feat/multiobject' into 'master'

Feat/multiobject

See merge request !6
parents 5e91e616 4362c33d
......@@ -11,3 +11,10 @@ meshroom-models/*
projects/*
/venv/
/project.md
/6DPoseEstimationDatasetProvider_Data/
/models/
/MonoBleedingEdge/
/6DPoseEstimationDatasetProvider.exe
/UnityCrashHandler64.exe
/UnityPlayer.dll
/WinPixEventRuntime.dll
# DEEPLABCUT.yaml
#DeepLabCut2.0 Toolbox (deeplabcut.org)
#© A. & M. Mathis Labs
#https://github.com/DeepLabCut/DeepLabCut
#Please see AUTHORS for contributors.
#https://github.com/DeepLabCut/DeepLabCut/blob/master/AUTHORS
#Licensed under GNU Lesser General Public License v3.0
#
# DeepLabCut environment
# FIRST: INSTALL CORRECT DRIVER for GPU, see https://stackoverflow.com/questions/30820513/what-is-the-correct-version-of-cuda-for-my-nvidia-driver/30820690
#
# install: conda env create -f DEEPLABCUT.yaml
# update: conda env update -f DEEPLABCUT.yaml
name: DEEPLABCUT
channels:
- conda-forge
- defaults
dependencies:
- python=3.8
- pip
- jupyter
- nb_conda
- ffmpeg
- pip:
- "deeplabcut[gui]"
\ No newline at end of file
# install: conda env create -f EasyPose.yaml
# update: conda env update -f EasyPose.yaml
name: EasyPose
channels:
- pytorch
- conda-forge
- defaults
dependencies:
- argon2-cffi=20.1.0=py37hcc03f2d_2
- async_generator=1.10=py_0
- attrs=21.2.0=pyhd8ed1ab_0
- backcall=0.2.0=pyh9f0ad1d_0
- backports=1.0=py_2
- backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0
- blas=1.0=mkl
- bleach=3.3.0=pyh44b312d_0
- ca-certificates=2021.10.26=haa95532_2
- certifi=2021.10.8=py37haa95532_2
- cffi=1.14.5=py37hd8e9650_0
- colorama=0.4.4=pyh9f0ad1d_0
- cpuonly=1.0=0
- cudatoolkit=10.0.130=0
- cudnn=7.6.5=cuda10.0_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- entrypoints=0.3=pyhd8ed1ab_1003
- freetype=2.10.4=hd328e21_0
- geos=3.9.1=h39d44d4_2
- icu=68.1=h0e60522_0
- importlib-metadata=4.4.0=py37h03978a9_0
- ipykernel=5.5.5=py37h7813e69_0
- ipython=7.24.0=py37h7813e69_0
- ipython_genutils=0.2.0=py_1
- ipywidgets=7.6.3=pyhd3deb0d_0
- jedi=0.18.0=py37h03978a9_2
- jinja2=3.0.1=pyhd8ed1ab_0
- jpeg=9d=h8ffe710_0
- jsonschema=3.2.0=pyhd8ed1ab_3
- jupyter=1.0.0=py37h03978a9_6
- jupyter_client=6.1.12=pyhd8ed1ab_0
- jupyter_console=6.4.0=pyhd8ed1ab_0
- jupyter_core=4.7.1=py37h03978a9_0
- jupyterlab_pygments=0.1.2=pyh9f0ad1d_0
- jupyterlab_widgets=1.0.0=pyhd8ed1ab_1
- libblas=3.9.0=9_mkl
- libcblas=3.9.0=9_mkl
- liblapack=3.9.0=9_mkl
- libpng=1.6.37=h1d00b33_2
- libsodium=1.0.18=h8d14728_1
- libtiff=4.2.0=hd0e1b90_0
- libwebp=1.2.0=h2bbff1b_0
- lz4-c=1.9.3=h2bbff1b_1
- markupsafe=2.0.1=py37hcc03f2d_0
- matplotlib-inline=0.1.2=pyhd8ed1ab_2
- mistune=0.8.4=py37hcc03f2d_1003
- mkl=2021.2.0=hb70f87d_389
- mkl-service=2.3.0=py37h2bbff1b_1
- mkl_fft=1.3.0=py37h277e83a_2
- mkl_random=1.2.1=py37hf11a4ad_2
- nb_conda=2.2.1=py37h03978a9_4
- nb_conda_kernels=2.3.1=py37h03978a9_0
- nbclient=0.5.3=pyhd8ed1ab_0
- nbconvert=6.0.7=py37h03978a9_3
- nbformat=5.1.3=pyhd8ed1ab_0
- nest-asyncio=1.5.1=pyhd8ed1ab_0
- ninja=1.10.2=py37h559b2a2_3
- notebook=6.4.0=pyha770c72_0
- olefile=0.46=py37_0
- openssl=1.1.1m=h2bbff1b_0
- packaging=20.9=pyh44b312d_0
- pandas=1.2.4=py37hd77b12b_0
- pandoc=2.14.0.1=h8ffe710_0
- pandocfilters=1.4.2=py_1
- parso=0.8.2=pyhd8ed1ab_0
- pickleshare=0.7.5=py_1003
- pip=21.1.2=pyhd8ed1ab_0
- prometheus_client=0.11.0=pyhd8ed1ab_0
- prompt-toolkit=3.0.18=pyha770c72_0
- prompt_toolkit=3.0.18=hd8ed1ab_0
- pycparser=2.20=pyh9f0ad1d_2
- pygments=2.9.0=pyhd8ed1ab_0
- pyparsing=2.4.7=pyh9f0ad1d_0
- pyqt=5.12.3=py37h03978a9_7
- pyqt-impl=5.12.3=py37hf2a7229_7
- pyqt5-sip=4.19.18=py37hf2a7229_7
- pyqtchart=5.12=py37hf2a7229_7
- pyqtwebengine=5.12.1=py37hf2a7229_7
- pyrsistent=0.17.3=py37hcc03f2d_2
- python=3.7.10=h7840368_100_cpython
- python-dateutil=2.8.1=py_0
- python_abi=3.7=1_cp37m
- pytz=2021.1=pyhd3eb1b0_0
- pywin32=300=py37hcc03f2d_0
- pywinpty=1.1.1=py37h7f67f24_0
- pyzmq=22.1.0=py37hcce574b_0
- qt=5.12.9=h5909a2a_4
- qtconsole=5.1.0=pyhd8ed1ab_0
- qtpy=1.9.0=py_0
- send2trash=1.5.0=py_0
- setuptools=49.6.0=py37h03978a9_3
- shapely=1.7.1=py37heb7c565_4
- six=1.16.0=pyh6c4a22f_0
- sqlite=3.35.5=h8ffe710_0
- tbb=2021.2.0=h2d74725_0
- terminado=0.10.0=py37h03978a9_0
- testpath=0.5.0=pyhd8ed1ab_0
- tk=8.6.11=h2bbff1b_0
- tornado=6.1=py37hcc03f2d_1
- traitlets=5.0.5=py_0
- typing_extensions=3.7.4.3=py_0
- vc=14.2=hb210afc_4
- vs2015_runtime=14.28.29325=h5e1d092_4
- wcwidth=0.2.5=pyh9f0ad1d_2
- webencodings=0.5.1=py_1
- wheel=0.36.2=pyhd3deb0d_0
- widgetsnbextension=3.5.1=py37h03978a9_4
- wincertstore=0.2=py37h03978a9_1006
- winpty=0.4.3=4
- xz=5.2.5=h62dcd97_0
- zeromq=4.3.4=h0e60522_0
- zipp=3.4.1=pyhd8ed1ab_0
- zlib=1.2.11=h62dcd97_1010
- zstd=1.4.9=h19a0ad4_0
- pip:
- absl-py==0.12.0
- astor==0.8.1
- astunparse==1.6.3
- bayesian-optimization==1.2.0
- cachetools==5.0.0
- chardet==4.0.0
- click==8.0.1
- colorcet==3.0.0
- cycler==0.10.0
- cython==0.29.23
- decorator==4.4.2
- deeplabcut==2.2.0.6
- deeplabcut-live==1.0.1
- filterpy==1.4.5
- flatbuffers==2.0
- gast==0.2.2
- google-auth==2.6.0
- google-auth-oauthlib==0.4.6
- google-pasta==0.2.0
- grpcio==1.38.0
- h5py==2.10.0
- idna==2.10
- imageio==2.9.0
- imageio-ffmpeg==0.4.4
- imgaug==0.4.0
- install==1.3.5
- intel-openmp==2021.2.0
- joblib==1.0.1
- keras==2.7.0
- keras-applications==1.0.8
- keras-preprocessing==1.1.2
- kiwisolver==1.3.1
- libclang==13.0.0
- llvmlite==0.34.0
- markdown==3.3.4
- matplotlib==3.1.3
- moviepy==1.0.1
- msgpack==1.0.2
- msgpack-numpy==0.4.7.1
- multipledispatch==0.6.0
- networkx==2.5.1
- numba==0.51.1
- numexpr==2.7.3
- numpy==1.17.5
- oauthlib==3.2.0
- opencv-contrib-python==4.5.5.64
- opencv-python==4.5.5.64
- opencv-python-headless==3.4.9.33
- opt-einsum==3.3.0
- param==1.12.0
- patsy==0.5.1
- pillow==8.2.0
- proglog==0.1.9
- protobuf==3.17.1
- psutil==5.8.0
- py-cpuinfo==5.0.0
- pyasn1==0.4.8
- pyasn1-modules==0.2.8
- pyct==0.4.8
- pyrr==0.10.3
- pywavelets==1.1.1
- pyyaml==5.4.1
- requests==2.25.1
- requests-oauthlib==1.3.1
- rsa==4.8
- ruamel-yaml==0.17.7
- ruamel-yaml-clib==0.2.2
- scikit-image==0.18.1
- scikit-learn==0.24.2
- scipy==1.6.3
- statsmodels==0.12.2
- tables==3.6.1
- tabulate==0.8.9
- tensorboard==2.8.0
- tensorboard-data-server==0.6.1
- tensorboard-plugin-wit==1.8.1
- tensorflow==2.7.1
- tensorflow-estimator==2.7.0
- tensorflow-gpu==1.15.5
- tensorflow-io-gcs-filesystem==0.24.0
- tensorpack==0.9.8
- termcolor==1.1.0
- tf-slim==1.1.0
- threadpoolctl==2.1.0
- tifffile==2021.4.8
- torch==1.11.0+cu113
- torchaudio==0.11.0+cu113
- torchvision==0.12.0+cu113
- tqdm==4.61.0
- urllib3==1.26.5
- werkzeug==2.0.1
- wrapt==1.12.1
- wxpython==4.0.7.post2
import argparse
import json
import os
import sys
from datetime import datetime
from shutil import copy
import PIL
import deeplabcut
import pathlib
import os
from ruamel.yaml import YAML
def get_arguments():
parser = argparse.ArgumentParser("Create and train a model")
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", '--create', help="Only create the model to train later", action="store_true")
group.add_argument("-t", '--train', help="Start train a previous created model", action="store_true")
group.add_argument("-e", '--export', help="Export a previous trained model", action="store_true")
group.add_argument("-n", '--name', help="Name of the project", type=str)
parser.add_argument("-f", '--configfile', type=str, help="Path to the config file")
return parser.parse_args()
class CreateTrainingDataset:
def __init__(self, scan_object_name=""):
self.individuals = set()
self.bodyparts = []
self.skeleton = []
self.scorer_list = "scorer"
self.bodyparts_list = "bodyparts"
self.individuals_list = "individuals"
self.coords_list = "coords"
self.picture_list = {}
self.config_file = ""
if scan_object_name == "":
print("Please enter the name of the object you want to train")
self.scan_object_name = input()
else:
self.scan_object_name = scan_object_name
def create_project(self, object_name="", project_name=""):
if not os.path.isdir("trainData"):
os.mkdir("trainData")
if not os.path.isdir("objectData"):
os.mkdir("objectData")
if object_name == "":
print("Copy train data and object data to the trainData/objectData Folder and press enter to continue")
input()
# creates the deeplabcut project
current_time = datetime.now().strftime("%H-%M-%S")
if project_name == "":
self.config_file = deeplabcut.create_new_project('Dataset', 'Unity-' + current_time,
[pathlib.Path("video.mp4").absolute()],
working_directory=str(
pathlib.Path().absolute()) + "/projects",
multianimal=True, copy_videos=True)
else:
self.config_file = deeplabcut.create_new_project(project_name, 'Dataset-Unity-' + current_time,
[pathlib.Path("video.mp4").absolute()],
working_directory=str(
pathlib.Path().absolute()) + "/projects",
multianimal=True, copy_videos=True)
def create_dataset(self):
# opens and loads the config file for deeplabcut
print(pathlib.Path(self.config_file))
file = open(pathlib.Path(self.config_file), "r")
cfg = YAML().load(file)
file = open(pathlib.Path("objectData/" + self.scan_object_name + ".json"), "r")
data = json.load(file)
for keypoint in data["keypoints"]:
self.bodyparts.append(keypoint["name"])
for bone in data["skeleton"]:
self.skeleton.append((bone["keypoint1"], bone["keypoint2"]))
index_most_individuals = 0
count_individuals = 0
for i, data_name in enumerate(os.listdir("trainData")):
# break loop if there are no more data files
if "image" in data_name:
continue
# open data and image file
file = open(pathlib.Path("trainData/" + data_name), "r")
data = json.load(file)
count = 0
for item in data["Items"]:
if self.scan_object_name in item["name"]:
count += 1
if count > count_individuals:
count_individuals = count
index_most_individuals = i
for frame_index, data_name in enumerate(os.listdir("trainData")):
# break loop if there are no more data files
if "image" in data_name:
continue
# open data and image file
file = open(pathlib.Path("trainData/" + data_name), "r")
data = json.load(file)
image_name = "image" + data_name.split("data")[1].split(".")[0] + ".png"
image = PIL.Image.open("trainData/" + image_name)
# read size of image
image_width, image_height = image.size
for item in data["Items"]:
# search for the scan object item
if self.scan_object_name in item["name"]:
# create header for the csv file if its the first file
self.individuals.add(item["name"])
if frame_index == index_most_individuals:
for i, part in enumerate(item["keyPointScreenCoordinates"]):
self.bodyparts_list += "," + self.bodyparts[i]
self.bodyparts_list += "," + self.bodyparts[i]
self.individuals_list += "," + item["name"]
self.individuals_list += "," + item["name"]
self.scorer_list += "," + cfg["scorer"]
self.scorer_list += "," + cfg["scorer"]
self.coords_list += ",x"
self.coords_list += ",y"
# data for one frame of the training set
if image_name not in self.picture_list:
self.picture_list[image_name] = "labeled-data/video/" + image_name
image_data = ""
# scan all key points and map them to the real image coordinates if the keypoint is visible else -> skip
for coord in item["keyPointScreenCoordinates"]:
if coord["z"] == 1:
image_data += "," + str(coord["x"] * image_width) + "," + str(
image_height - coord["y"] * image_height)
else:
image_data += ",,"
# save image_data
self.picture_list[image_name] += image_data
# copy image to the training folder
copy("trainData/" + image_name,
cfg["project_path"] + "/labeled-data/video")
file.close()
# test if training data is found
expected_fields = len(self.individuals) * len(self.bodyparts) * 2
for picture_path in self.picture_list:
print(picture_path)
field_count = self.picture_list[picture_path].count(",")
self.picture_list[picture_path] += "," * (expected_fields - field_count)
print(self.picture_list[picture_path].count(","))
if count_individuals == 0:
print("No key points found for the object!")
sys.exit()
# open file for training data
file = open(cfg["project_path"] + "/labeled-data/video/CollectedData_" + cfg["scorer"] + ".csv", "w")
# store training data
file.writelines(
[self.scorer_list, "\n", self.individuals_list, "\n", self.bodyparts_list, "\n", self.coords_list,
"\n", "\n".join(self.picture_list.values())])
file.close()
# edit config for new bodyparts and save
cfg["multianimalbodyparts"] = self.bodyparts
cfg["bodyparts"] = "MULTI!"
cfg["individuals"] = list(self.individuals)
cfg["skeleton"] = self.skeleton
file = open(pathlib.Path(self.config_file), "w")
YAML().dump(cfg, file)
# convert training data file to h5
deeplabcut.convertcsv2h5(self.config_file, False)
def label_images(self, user_input=True):
if user_input:
print("Do you want to create labeled images to check the key points(Y/N, default=N)")
if "y" in input().lower():
# create images with labels
deeplabcut.check_labels(self.config_file)
else:
deeplabcut.check_labels(self.config_file)
def create_training_dataset(self):
# create training dataset
deeplabcut.create_training_dataset(self.config_file)
def start_training(self):
try:
deeplabcut.train_network(self.config_file, displayiters=100, saveiters=1000, allow_growth=True)
except KeyboardInterrupt:
print("Stopped Training")
def export_model(self):
deeplabcut.export_model(self.config_file)
if __name__ == '__main__':
args = get_arguments()
if not args.name:
name = ""
else:
name = args.name
if args.create:
dataset = CreateTrainingDataset()
dataset.create_project(project_name=name)
dataset.create_dataset()
dataset.label_images()
dataset.create_training_dataset()
elif args.train:
if not args.configfile:
print("Config file required!")
sys.exit()
dataset = CreateTrainingDataset(" ")
dataset.config_file = os.path.abspath(args.configfile)
dataset.start_training()
dataset.export_model()
elif args.export:
if not args.configfile:
print("Config file required!")
sys.exit()
dataset = CreateTrainingDataset(" ")
dataset.config_file = os.path.abspath(args.configfile)
dataset.export_model()
else:
dataset = CreateTrainingDataset()
dataset.create_project(project_name=name)
dataset.create_dataset()
dataset.label_images()
dataset.create_training_dataset()
dataset.start_training()
dataset.export_model()
......@@ -17,21 +17,27 @@ def get_arguments():
parser = argparse.ArgumentParser("Create and train a model")
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", '--create', help="Only create the model to train later", action="store_true")
group.add_argument("-t", '--train', help="Start train a previous created model", action="store_true")
group.add_argument("-e", '--export', help="Export a previous trained model", action="store_true")
group.add_argument("-n", '--name', help="Name of the project", type=str)
parser.add_argument("-f", '--configfile', type=str, help="Path to the config file")
group.add_argument("-t", '--train', help="Start train a previous created model (Argument: Path to the config file)",
type=str)
group.add_argument("-e", '--export', help="Export a previous trained model (Argument: Path to the config file)",
type=str)
parser.add_argument("-n", '--name', help="Name of the object that gets tracked", type=str)
parser.add_argument("-l", '--label', help="Create labeled images for dataset", action="store_true")
parser.add_argument("-m", '--multiobject', help="Dataset is for a multiobject project", action="store_true")
parser.add_argument("-b", '--batchsize', help="Batch-size used for training", type=int)
parser.add_argument("-r", '--ready', help="Skip interruption for copying files", action="store_true")
return parser.parse_args()
class CreateTrainingDataset:
def __init__(self, scan_object_name=""):
self.individuals = set()
self.bodyparts = []
self.skeleton = []
self.scorer_list = "scorer"
self.bodyparts_list = "bodyparts"
self.individuals_list = "individuals"
self.coords_list = "coords"
self.picture_list = []
self.first_file = True
self.config_file = ""
if scan_object_name == "":
......@@ -40,12 +46,12 @@ class CreateTrainingDataset:
else:
self.scan_object_name = scan_object_name
def create_project(self, object_name="", project_name=""):
def create_project(self, skip=False, project_name="", multianimal=False):
if not os.path.isdir("trainData"):
os.mkdir("trainData")
if not os.path.isdir("objectData"):
os.mkdir("objectData")
if object_name == "":
if not skip:
print("Copy train data and object data to the trainData/objectData Folder and press enter to continue")
input()
# creates the deeplabcut project
......@@ -55,16 +61,17 @@ class CreateTrainingDataset:
[pathlib.Path("video.mp4").absolute()],
working_directory=str(
pathlib.Path().absolute()) + "/projects",
multianimal=False, copy_videos=True)
multianimal=multianimal, copy_videos=True)
else:
self.config_file = deeplabcut.create_new_project(project_name, 'Dataset-Unity-' + current_time,
[pathlib.Path("video.mp4").absolute()],
working_directory=str(
pathlib.Path().absolute()) + "/projects",
multianimal=False, copy_videos=True)
multianimal=multianimal, copy_videos=True)
def create_dataset(self):
# opens and loads the config file for deeplabcut
picture_list = []
print(pathlib.Path(self.config_file))
file = open(pathlib.Path(self.config_file), "r")
cfg = YAML().load(file)
......@@ -116,7 +123,7 @@ class CreateTrainingDataset:
image_data += "\n"
# save image_data
self.picture_list.append(image_data)
picture_list.append(image_data)
# copy image to the training folder
copy("trainData/" + image_name,
cfg["project_path"] + "/labeled-data/video")
......@@ -132,7 +139,7 @@ class CreateTrainingDataset:
# store training data
file.writelines(
[self.scorer_list, "\n", self.bodyparts_list, "\n", self.coords_list, "\n", ] + self.picture_list)
[self.scorer_list, "\n", self.bodyparts_list, "\n", self.coords_list, "\n", ] + picture_list)
file.close()
# edit config for new bodyparts and save
......@@ -144,6 +151,113 @@ class CreateTrainingDataset:
# convert training data file to h5
deeplabcut.convertcsv2h5(self.config_file, False)
def create_multianimal_dataset(self):
# opens and loads the config file for deeplabcut
picture_list = {}
print(pathlib.Path(self.config_file))
file = open(pathlib.Path(self.config_file), "r")
cfg = YAML().load(file)
file = open(pathlib.Path("objectData/" + self.scan_object_name + ".json"), "r")
data = json.load(file)
for keypoint in data["keypoints"]:
self.bodyparts.append(keypoint["name"])
for bone in data["skeleton"]:
self.skeleton.append((bone["keypoint1"], bone["keypoint2"]))
index_most_individuals = 0
count_individuals = 0
for i, data_name in enumerate(os.listdir("trainData")):
# break loop if there are no more data files
if "image" in data_name:
continue
# open data and image file
file = open(pathlib.Path("trainData/" + data_name), "r")
data = json.load(file)
count = 0
for item in data["Items"]:
if self.scan_object_name in item["name"]: