diff --git a/NDF_combine.py b/NDF_combine.py new file mode 100644 index 0000000..190f862 --- /dev/null +++ b/NDF_combine.py @@ -0,0 +1,100 @@ +import models.local_model as model +from models.data import dataloader_garments, voxelized_data_shapenet + +from models import generation +import torch +from torch.nn import functional as F + + +def rot_YZ(points): + points_rot = points.copy() + points_rot[:, 1], points_rot[:, 2] = points[:, 2], points[:, 1] + return points_rot + +def to_grid(points): + grid_points = points.copy() + grid_points[:, 0], grid_points[:, 2] = points[:, 2], points[:, 0] + + return 2 * grid_points + +def from_grid(grid_points): + points = grid_points.copy() + points[:, 0], points[:, 2] = grid_points[:, 2], grid_points[:, 0] + + return 0.5 * points + +# 'test', 'val', 'train' +def loadNDF(index, pointcloud_samples, exp_name, data_dir, split_file, sample_distribution, sample_sigmas, res, mode = 'test'): + + global encoding + global net + global device + + net = model.NDF() + + device = torch.device("cuda") + + + if 'garments' in exp_name.lower() : + + dataset = dataloader_garments.VoxelizedDataset(mode = mode, data_path = data_dir, split_file = split_file, + res = res, density =0, pointcloud_samples = pointcloud_samples, + sample_distribution=sample_distribution, + sample_sigmas=sample_sigmas, + ) + + + + checkpoint = 'checkpoint_127h:6m:33s_457593.9149734974' + + generator = generation.Generator(net,exp_name, checkpoint = checkpoint, device = device) + + if 'cars' in exp_name.lower() : + + dataset = voxelized_data_shapenet.VoxelizedDataset( mode = mode, res = res, pointcloud_samples = pointcloud_samples, + data_path = data_dir, split_file = split_file, + sample_distribution = sample_distribution, sample_sigmas = sample_sigmas, + batch_size = 1, num_sample_points = 1024, num_workers = 1 + ) + + + + checkpoint = 'checkpoint_108h:5m:50s_389150.3971107006' + + generator = generation.Generator(net, exp_name, checkpoint=checkpoint, device=device) + + + example = dataset[index] + + print('Object: ',example['path']) + inputs = torch.from_numpy(example['inputs']).unsqueeze(0).to(device) # lead inputs and samples including one batch channel + + for param in net.parameters(): + param.requires_grad = False + + encoding = net.encoder(inputs) + + + +def predictRotNDF(points): + + points = rot_YZ(points) + points = to_grid(points) + points = torch.from_numpy(points).unsqueeze(0).float().to(device) + return torch.clamp(net.decoder(points,*encoding), max=0.1).squeeze(0).cpu().numpy() + + +def predictRotGradientNDF(points): + points = rot_YZ(points) + points = to_grid(points) + points = torch.from_numpy(points).unsqueeze(0).float().to(device) + points.requires_grad = True + + df_pred = torch.clamp(net.decoder(points,*encoding), max=0.1) + + df_pred.sum().backward() + + gradient = F.normalize(points.grad, dim=2)[0].detach().cpu().numpy() + + df_pred = df_pred.detach().squeeze(0).cpu().numpy() + return df_pred, rot_YZ( 2 * from_grid(gradient)) diff --git a/README.md b/README.md index 3369c90..b5dee00 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,36 @@ but replacing `configs/shapenet_cars.txt` in the commands with the desired confi > with execution of this command. Y needs to be an integer between 0 to X-1, including O and X-1. In case you have SLURM > available you can use `slurm_scripts/run_preprocessing.sh` +## Downloading Garments and Scenes + +To run the garment processing run + +``` +cd dataprocessing +chmod u+x garment_process.sh +source garment_process.sh +``` + +## Scenes +Download gibson dataset from [here](https://docs.google.com/forms/d/e/1FAIpQLScWlx5Z1DM1M-wTSXaa6zV8lTFkPmTHW1LqMsoCBDWsTDjBkQ/viewform) +Specifically we used this version: +All scenes, 572 scenes (108GB): gibson_v2_all.tar.gz + +Then run +``` +python scene_process.py --input_path --output_path --sigmas 0.01 0.04 0.16 --res 256 --density 0.001708246 + +``` + +This also creates a split file of scenes to be later used for training. This is stored in ./datasets alongside the garments split file + + +``` +./datasets/split_scenes.npz +./datasets/split_garments.npz +``` + + ## Training and generation To train NDF use ``` @@ -98,9 +128,27 @@ python generate.py --config configs/shapenet_cars.txt Again, replacing `configs/shapenet_cars.txt` in the above commands with the desired configuration and `EXP_NAME` with the experiment name defined in the configuration. +## Rendering + +To render garments, run + +``` +python renderer.py --config configs/garments.txt +``` + +To render cars, run + +``` +python renderer.py --config configs/shapenet_cars_pretrained.txt +``` + +To render from different perspectives, change the `cam_position` and `cam_orientation` variables in the config files + ## Contact -For questions and comments please contact [Julian Chibane](http://virtualhumans.mpi-inf.mpg.de/people/Chibane.html) via mail. +For questions and comments about the training and generation, please contact [Julian Chibane](http://virtualhumans.mpi-inf.mpg.de/people/Chibane.html) via mail. + +For questions and comments about the rendering code, please contact [Aymen Mir](http://virtualhumans.mpi-inf.mpg.de/people/Mir.html) via mail. ## License Copyright (c) 2020 Julian Chibane, Max-Planck-Gesellschaft diff --git a/configs/config_loader.py b/configs/config_loader.py index 278ecbb..b3ed245 100755 --- a/configs/config_loader.py +++ b/configs/config_loader.py @@ -3,6 +3,9 @@ import os +def str2bool(inp): + return inp.lower() in 'true' + def config_parser(): parser = configargparse.ArgumentParser() @@ -95,6 +98,36 @@ def config_parser(): help='Optimizer used during training.') + + ## Rendering arguments + parser.add_argument("--pc_samples", type=int, help='input pointcloud size') + parser.add_argument("--index", type=int, help='index to be rendered') + + ### + parser.add_argument("--size", type=int, help="the size of image", default=512) + parser.add_argument("--max_depth", type=float, help="the max depth of projected rays", default=2) + parser.add_argument("--alpha", type=float, help="the value by which the stepping distance should be multiplied", + default=0.6) + parser.add_argument("--step_back", type=float, default=0.005, help="the value by which we step back after stopping criteria met") + parser.add_argument("--epsilon", type=float, default=0.0026, help="epsilon ball - stopping criteria") + parser.add_argument("--screen_bound", type=float, default=0.4) + parser.add_argument("--screen_depth", type=float, default=-1) + + parser.add_argument('--cam_position', nargs='+', type=float, help='3D position of camera', default=[0, 0, -1]) + parser.add_argument('--light_position', nargs='+', type=float, help='3D position of light source', + default=[-1, -1, -1]) + parser.add_argument("--cam_orientation", nargs='+', type=float, + help="Camera Orientation in xyz euler angles (degrees)", default=[180.0, 0.0, -180.0]) + + parser.add_argument("--folder", type=str, default='./save', + help="location where images are to be saved") + parser.add_argument("--shade", type=str2bool, default=True, help="whether to save shade image") + parser.add_argument("--depth", type=str2bool, default=True, help="whether to save depth image") + parser.add_argument("--normal", type=str2bool, default=True, help="whether to save normal image") + + parser.add_argument("--debug_mode", type=str2bool, default=True, + help="to visualize everything in debug mode or not") + return parser diff --git a/configs/garments.txt b/configs/garments.txt new file mode 100644 index 0000000..d3d6423 --- /dev/null +++ b/configs/garments.txt @@ -0,0 +1,12 @@ +exp_name = garments_pretrained +data_dir = datasets/garments_data/ +split_file = datasets/split_garments.npz +sample_std_dev = [0.08, 0.02, 0.01] +sample_ratio = [0.02, 0.48, 0.50] + +input_res = 256 +pc_samples = 3000 +index = 10 +num_points=1000 +cam_position=[0, 1, 0] +cam_orientation=[90.0, 0.0, 180.0] \ No newline at end of file diff --git a/configs/shapenet_cars.txt b/configs/shapenet_cars.txt index d4759b1..69b0f98 100644 --- a/configs/shapenet_cars.txt +++ b/configs/shapenet_cars.txt @@ -4,3 +4,10 @@ split_file = datasets/shapenet/data/split_cars.npz input_data_glob = /*/model.obj sample_std_dev = [0.08, 0.02, 0.003] sample_ratio = [0.01, 0.49, 0.5] + +input_res = 256 +pc_samples = 3000 +index = 1 +num_points=1000 +cam_position=[0, 1, 0] +cam_orientation=[90.0, 0.0, 180.0] \ No newline at end of file diff --git a/configs/shapenet_cars_pretrained.txt b/configs/shapenet_cars_pretrained.txt index 5d05dfa..fdcfef4 100644 --- a/configs/shapenet_cars_pretrained.txt +++ b/configs/shapenet_cars_pretrained.txt @@ -4,3 +4,10 @@ split_file = datasets/shapenet/data/split_cars.npz input_data_glob = /*/model.obj sample_std_dev = [0.08, 0.02, 0.003] sample_ratio = [0.01, 0.49, 0.5] + +input_res = 256 +pc_samples = 3000 +index = 1 +num_points=1000 +cam_position=[0, 1, 0] +cam_orientation=[90.0, 0.0, 180.0] \ No newline at end of file diff --git a/dataprocessing/garment_normalize.py b/dataprocessing/garment_normalize.py new file mode 100644 index 0000000..bb4c221 --- /dev/null +++ b/dataprocessing/garment_normalize.py @@ -0,0 +1,41 @@ +import os +import numpy as np +import trimesh +import argparse + +def get_dirs_paths(d): + paths = [os.path.join(d, o) for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))] + dirs = [ o for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))] + return sorted(dirs), sorted(paths) + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_folder", type = str) + parser.add_argument("--output_folder", type = str) + args = parser.parse_args() + + lists = ['TShirtNoCoat.obj', 'ShortPants.obj', 'Pants.obj', 'ShirtNoCoat.obj', 'LongCoat.obj'] + + all_dirs, all_paths = get_dirs_paths(args.input_folder) + for index in range(len(all_paths)): + path = all_paths[index] + for file in os.listdir(path): + if file in lists: + class_name = file.replace('.obj', '') + mesh_path = os.path.join(path, file) + out_dir = os.path.join(args.output_folder, class_name + '_' + all_dirs[index]) + if not os.path.isdir(out_dir): + os.makedirs(out_dir) + + out_file = os.path.join(out_dir, 'mesh.off') + + mesh = trimesh.load(mesh_path) + + new_verts = mesh.vertices - np.mean(mesh.vertices, axis = 0) + new_verts_sc = new_verts / 0.9748783846 + new_verts_sc = new_verts_sc * 0.5 + new_mesh = trimesh.Trimesh(vertices = new_verts_sc, faces = mesh.faces) + new_mesh.export(out_file) + print("Processed {} {}".format(all_dirs[index], class_name)) diff --git a/dataprocessing/garment_preprocess.py b/dataprocessing/garment_preprocess.py new file mode 100644 index 0000000..c1e2964 --- /dev/null +++ b/dataprocessing/garment_preprocess.py @@ -0,0 +1,115 @@ +from scipy.spatial import cKDTree as KDTree +import numpy as np +import trimesh +from glob import glob +import os +import multiprocessing as mp +from multiprocessing import Pool +import argparse +import random +import traceback +from functools import partial +import pymesh + + +def create_grid_points_from_bounds(minimun, maximum, res): + x = np.linspace(minimun, maximum, res) + X, Y, Z = np.meshgrid(x, x, x, indexing='ij') + X = X.reshape((np.prod(X.shape),)) + Y = Y.reshape((np.prod(Y.shape),)) + Z = Z.reshape((np.prod(Z.shape),)) + + points_list = np.column_stack((X, Y, Z)) + del X, Y, Z, x + return points_list + + +def voxelized_pointcloud_boundary_sampling(path, sigmas, res, inp_points, sample_points): + try: + out_voxelization_file = path + '/voxelized_point_cloud_{}res_{}points.npz'.format(res, inp_points) + + off_path = path + '/mesh.off' + mesh = trimesh.load(off_path) + py_mesh = pymesh.load_mesh(off_path) + + # ===================== + # Voxelized point cloud + # ===================== + + if not os.path.exists(out_voxelization_file): + + bb_min = -0.5 + bb_max = 0.5 + + point_cloud = mesh.sample(inp_points) + + # Grid Points used for computing occupancies + grid_points = create_grid_points_from_bounds(bb_min, bb_max, args.res) + + # KDTree creation for fast querying nearest neighbour to points on the point cloud + kdtree = KDTree(grid_points) + _, idx = kdtree.query(point_cloud) + + occupancies = np.zeros(len(grid_points), dtype=np.int8) + occupancies[idx] = 1 + compressed_occupancies = np.packbits(occupancies) + + np.savez(out_voxelization_file, point_cloud=point_cloud, compressed_occupancies = compressed_occupancies, + bb_min = bb_min, bb_max = bb_max, res = res) + print('Finished Voxelized point cloud {}'.format(path)) + + # ================== + # Boundary Sampling + # ================== + for sigma in sigmas: + out_sampling_file = path + '/pymesh_boundary_{}_samples.npz'.format( sigma) + + if not os.path.exists(out_sampling_file): + points = mesh.sample(sample_points) + if sigma == 0: + boundary_points = points + else: + boundary_points = points + sigma * np.random.randn(sample_points, 3) + + # Transform the boundary points to grid coordinates + grid_coords = boundary_points.copy() + grid_coords[:, 0], grid_coords[:, 2] = boundary_points[:, 2], boundary_points[:, 0] + + grid_coords = 2 * grid_coords + + # distance field calculation + if sigma == 0: + df = np.zeros(boundary_points.shape[0]) + else: + df = np.sqrt(pymesh.distance_to_mesh(py_mesh, boundary_points)[0]) + np.savez(out_sampling_file, points=boundary_points, df=df, grid_coords=grid_coords) + + print('Finished boundary sampling {}'.format(out_sampling_file)) + + except Exception as err: + print('Error with {}: {}'.format(path, traceback.format_exc())) + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Run boundary sampling' + ) + + parser.add_argument("--input_path", type = str) + parser.add_argument("--sigmas", nargs = '+', type = float) + parser.add_argument("--res", type = int) + parser.add_argument("--inp_points", type = int) + parser.add_argument("--sample_points", type = int) + + args = parser.parse_args() + + + paths = glob(args.input_path + '/*/') + + #To run te script multiple times in parallel: shuffling the data + random.shuffle(paths) + print(mp.cpu_count()) + p = Pool(mp.cpu_count()) + p.map(partial(voxelized_pointcloud_boundary_sampling, sigmas=args.sigmas, res = args.res, + inp_points = args.inp_points, sample_points = args.sample_points), paths) + p.close() + p.join() diff --git a/dataprocessing/garment_process.sh b/dataprocessing/garment_process.sh new file mode 100755 index 0000000..1f659d6 --- /dev/null +++ b/dataprocessing/garment_process.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +wget https://datasets.d2.mpi-inf.mpg.de/MultiGarmentNetwork/Multi-Garmentdataset.zip +wget https://datasets.d2.mpi-inf.mpg.de/MultiGarmentNetwork/Multi-Garmentdataset_02.zip + +unzip Multi-Garmentdataset.zip +unzip Multi-Garmentdataset_02.zip + +rm Multi-Garment_dataset.zip +rm Multi-Garment_dataset_02.zip + +mkdir "../datasets/garments_data" +conda activate pymesh + +python garment_normalize.py --input_folder "./Multi-Garment_dataset" --output_folder "../datasets/garments_data" +python garment_normalize.py --input_folder "./Multi-Garment_dataset_02" --output_folder "../datasets/garments_data" + +python garment_preprocess.py --input_path "../datasets/garments_data" --res 256 --inp_points 3000 --sample_points 100000 --sigmas 0.01 0.02 0.08 +python garment_preprocess.py --input_path "../datasets/garments_data" --res 256 --inp_points 300 --sample_points 100000 --sigmas 0.01 0.02 0.08 + diff --git a/dataprocessing/scene_process.py b/dataprocessing/scene_process.py new file mode 100644 index 0000000..1b55627 --- /dev/null +++ b/dataprocessing/scene_process.py @@ -0,0 +1,265 @@ +import trimesh +import pymesh +import numpy as np + +import os +import traceback +from functools import partial +from scipy.spatial import cKDTree as KDTree +from PIL import Image + +Image.MAX_IMAGE_PIXELS = None +from glob import glob +import multiprocessing as mp +from multiprocessing import Pool +import argparse +import random + + +def find_verts(verts, minx, maxx, miny, maxy, minz, maxz): + """ + Finds the vertex indices between the specified points of a cuboid + :param verts: + :param minx: + :param maxx: + :param miny: + :param maxy: + :param minz: + :param maxz: + :return: + """ + verts_locs = np.where(verts[:, 0] <= maxx) + verts_loc2 = np.where(verts[:, 0] >= minx) + + verts_locs3 = np.where(verts[:,1] <= maxy) + verts_locs4 = np.where(verts[:,1] >= miny) + + verts_locs5 = np.where(verts[:,2] <= maxz) + verts_locs6 = np.where(verts[:,2] >= minz) + + ret_verts = list(set(verts_locs[0].tolist()) & (set(verts_loc2[0].tolist())) & + (set(verts_locs3[0].tolist())) & set(verts_locs4[0].tolist()) & + set(verts_locs5[0].tolist()) & (set(verts_locs6[0].tolist()))) + + return ret_verts + +def get_boxes(points, low, high): + """ + Splits a set of vertices into cubes + :param points: The input set of vertices + :param low: The lowest bound of the input mesh + :param high: The upper bound of the input mesh + :return: A dictionary indexed by the lower left corner of the cuboids + Every cube which has atleast one vertex is indexed in the dictionary + """ + GRID_sIZE = 2.5 + + delt = -0.1 + dict_ret = {} + xs = np.linspace(-53 + delt, 72 + delt, np.uint8(np.round((72+53)/ GRID_sIZE )) + 1) + ys = np.linspace(-51 + delt, 54 + delt, np.uint8(np.round((105 / GRID_sIZE))) + 1) + zs = np.linspace(-7.5 +delt, 12.5 + delt, np.uint8(np.round((20/ GRID_sIZE))) + 1) + + + index = 0 + for i in range(len(xs) - 1): + for j in range(len(ys) -1): + for k in range(len(zs) - 1): + index = index + 1 + x_min = xs[i] + x_max = xs[i+1] + + y_min = ys[j] + y_max = ys[j+1] + + z_min = zs[k] + z_max = zs[k + 1] + + if x_min >= high[0] or x_max <= low[0] or y_min >= high[1] or y_max <= low[1] or z_min >= high[2] or z_max <= low[2]: + continue + else: + verts_inds = find_verts(points, x_min, x_max, y_min, y_max, z_min, z_max) + if not len(verts_inds) == 0: + dict_ret[(x_min, y_min, z_min)] = verts_inds + + return dict_ret + + +def create_grid_points_from_bounds(min_x, max_x, min_y, max_y, min_z, max_z, res): + x = np.linspace(min_x, max_x, res) + y = np.linspace(min_y, max_y, res) + z = np.linspace(min_z, max_z, res) + X, Y, Z = np.meshgrid(x, y, z, indexing='ij', sparse=False) + X = X.reshape((np.prod(X.shape),)) + Y = Y.reshape((np.prod(Y.shape),)) + Z = Z.reshape((np.prod(Z.shape),)) + + points_list = np.column_stack((X, Y, Z)) + del X, Y, Z, x + return points_list + +def bd_sampl_vx_ptcld(input_path, output_path, sigmas, res, density): + """ + Creates a voxelized pointcloud and + :param input_path: + :param output_path: + :param sigmas: + :param res: + :param density: + :return: + """ + print('Start with: ', input_path) + try: + norm_path = os.path.normpath(input_path) + scan_name = norm_path.split(os.sep)[-1] + + obj_path = input_path + '/{}_mesh_texture.obj'.format(scan_name) + + # Check if some other process already working on this scene + out_query = output_path + '/{}/*/pymesh_boundary_{}_samples.npz'.format(scan_name, sigmas[0]) + if len(glob(out_query)) > 0: + print('Exists - skip!') + return + + mesh = trimesh.load(obj_path) + py_mesh = pymesh.load_mesh(obj_path) + + # Input Point Cloud + sample_num = int(mesh.area / density) + point_cloud = mesh.sample(sample_num) + + # Boundary sampling points + print('Random sample mesh') + sample_num_bd = 5 * sample_num + points = mesh.sample(sample_num_bd) + + boundary_points_list = [] + df_list = [] + # ============================== + # Distance Field Computation + # ============================== + for sigma in sigmas: + print('Distance computation sigma {}'.format(sigma)) + + boundary_points = points + sigma * np.random.randn(sample_num_bd, 3) + boundary_points_list.append(boundary_points) + df_list.append(np.sqrt(pymesh.distance_to_mesh(py_mesh, boundary_points)[0])) + + print('Split into chunks: ', input_path) + split_dict = get_boxes(point_cloud, *mesh.bounds) + for cube_corner in split_dict: + # Go over ever occupied scene voxel + print('Start voxelization: ', input_path) + + # =========================== + # Voxelized Point Cloud + # =========================== + out_cube_path = output_path + '/{}/{}/'.format(scan_name, cube_corner) + os.makedirs(out_cube_path, exist_ok=True) + out_file = out_cube_path + 'voxelized_point_cloud_{}res_{}density.npz'.format(res, density) + + min_x, min_y, min_z = cube_corner + verts_inds = split_dict[cube_corner] + voxel_point_cloud = point_cloud[verts_inds] + + grid_points = create_grid_points_from_bounds(min_x, min_x + 2.5, min_y, min_y + 2.5, min_z, min_z + 2.5, res) + occupancies = np.zeros(len(grid_points), dtype=np.int8) + kdtree = KDTree(grid_points) + _, idx = kdtree.query(voxel_point_cloud) + occupancies[idx] = 1 + + compressed_occupancies = np.packbits(occupancies) + + np.savez(out_file, point_cloud=voxel_point_cloud, compressed_occupancies=compressed_occupancies, res=res) + + # ===================================== + # Split Distance Field into Cubes + # ===================================== + print('Start corner df computation: ', input_path) + + for i, sigma in enumerate(sigmas): + + df = df_list[i] + boundary_points = boundary_points_list[i] + + verts_inds = find_verts(boundary_points, min_x, min_x + 2.5, min_y, min_y + 2.5, min_z, min_z + 2.5) + + if len(verts_inds) == 0: + continue + + cube_df = df[verts_inds] + cube_points = boundary_points[verts_inds] + + cube_points2 = cube_points[:] - cube_corner + grid_cube_points = cube_points2.copy() + grid_cube_points[:, 0], grid_cube_points[:, 2] = cube_points2[:, 2], cube_points2[:, 0] + grid_cube_points = grid_cube_points / 2.5 + grid_cube_points = 2 * grid_cube_points - 1 + + out_path = output_path + '/{}/{}/'.format(scan_name, cube_corner) + os.makedirs(out_path, exist_ok=True) + + np.savez(out_path + '/pymesh_boundary_{}_samples.npz'.format(sigma), points=cube_points, df = cube_df, grid_coords= grid_cube_points) + + print('Finished {}'.format(input_path)) + except: + print('Error with {}: {}'.format(input_path, traceback.format_exc())) + +def normalize_paths(base_path, paths, res, density, sigmas): + """ + Creates the final split file used for training + :param base_path: The location of the output files + :param paths: The names of all the scans + :return: + """ + new_paths = [] + for name in paths: + path = base_path + '/' + name + cubes_paths = glob(path + '/*/') + cubes_paths_normalized = [] + for path_iter in cubes_paths: + name_cube = base_path + '/' + name + '/' + os.path.normpath(path_iter).split(os.sep)[-1] + condition_inc = os.path.exists(os.path.join(name_cube, 'voxelized_point_cloud_{}res_{}density.npz'.format(res, density))) + + for sigma in sigmas: + condition_inc = condition_inc and os.path.exists(os.path.join(name_cube, 'pymesh_boundary_{}_samples.npz'.format(sigma))) + if condition_inc: + cubes_paths_normalized.append('/' + name + '/' + os.path.normpath(path_iter).split(os.sep)[-1]) + + new_paths = new_paths + cubes_paths_normalized + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + parser.add_argument('--res', type=int) + parser.add_argument("--input_path", type = str) + parser.add_argument("--output_path", type = str , help = "without hyphen at the end") + parser.add_argument("--sigmas", nargs = '+', type = float) + parser.add_argument("--density", type = float, default = 0.001708246) + parser.add_argument("--split_file", type = str, default = "../datasets/split_scenes_names.npz" ) + args = parser.parse_args() + + paths = glob(args.input_path + '/*/') + paths.sort() + paths = paths[:1] + print(paths) + random.shuffle(paths) + + p = Pool(mp.cpu_count()) + p.map(partial(bd_sampl_vx_ptcld, sigmas=args.sigmas, res = args.res, density = args.density, output_path = args.output_path), paths) + p.close() + p.join() + + data = np.load(args.split_file) + modes = ['train', 'test', 'val'] + new_dict = {} + for mode in modes: + new_dict[mode] = normalize_paths(base_path=args.output_path, paths =data[mode]) + + np.savez('../datasets/split_scenes.npz', train = new_dict['train'], test = new_dict['test'], val = new_dict['val']) + + + + + + diff --git a/dataprocessing/voxelized_pointcloud_sampling.py b/dataprocessing/voxelized_pointcloud_sampling.py index 3ae598b..44e7317 100644 --- a/dataprocessing/voxelized_pointcloud_sampling.py +++ b/dataprocessing/voxelized_pointcloud_sampling.py @@ -13,13 +13,10 @@ def voxelized_pointcloud_sampling(path): input_file = os.path.join(out_path,file_name + '_scaled.off') out_file = out_path + '/voxelized_point_cloud_{}res_{}points.npz'.format(cfg.input_res, cfg.num_points) - if os.path.exists(out_file): print(f'Exists: {out_file}') return - - mesh = trimesh.load(input_file) point_cloud = mesh.sample(cfg.num_points) diff --git a/datasets/split_garments.npz b/datasets/split_garments.npz new file mode 100644 index 0000000..6763b98 Binary files /dev/null and b/datasets/split_garments.npz differ diff --git a/datasets/split_scenes_names.npz b/datasets/split_scenes_names.npz new file mode 100644 index 0000000..d2f53d6 Binary files /dev/null and b/datasets/split_scenes_names.npz differ diff --git a/experiments/garments_pretrained/checkpoints/checkpoint_127h:6m:33s_457593.9149734974.tar b/experiments/garments_pretrained/checkpoints/checkpoint_127h:6m:33s_457593.9149734974.tar new file mode 100644 index 0000000..af08294 Binary files /dev/null and b/experiments/garments_pretrained/checkpoints/checkpoint_127h:6m:33s_457593.9149734974.tar differ diff --git a/models/data/dataloader_garments.py b/models/data/dataloader_garments.py new file mode 100644 index 0000000..550ffd6 --- /dev/null +++ b/models/data/dataloader_garments.py @@ -0,0 +1,100 @@ +from __future__ import division +from torch.utils.data import Dataset +import os +import numpy as np +import torch +import traceback + + +class VoxelizedDataset(Dataset): + + + def __init__(self, mode, data_path, split_file, res = 256, density =0, + pointcloud_samples = 3000, batch_size = 1, num_sample_points = 1024, num_workers = 1, + sample_distribution = [1], sample_sigmas = [0.01], **kwargs): + """ + :param mode: train|test|val + :param data_path: path where data is stored + :param split_file: location of split file + :param res: resolution of input voxelized point cloud + :param density: Density used for generating input and boundary point clouds (one of density or pointcloud_samples must be specified) + :param pointcloud_samples: Number of samples used for generating input point cloud (one of density or pointcloud_samples must be specified) + :param batch_size: batch size + :param num_sample_points: total points used as input for the neural network + :param num_workers: Num workers used for training + :param sample_distribution: What fraction to use from boundary points generated using different sigmas + :param sample_sigmas: Sigmas used for boundary points generation + :param kwargs: + """ + self.sample_distribution = np.array(sample_distribution) + self.sample_sigmas = np.array(sample_sigmas) + + assert np.sum(self.sample_distribution) == 1 + assert np.any(self.sample_distribution < 0) == False + assert len(self.sample_distribution) == len(self.sample_sigmas) + + self.path = data_path + self.density = density + self.res = res + + self.data = np.load(split_file)[mode] + + self.num_sample_points = num_sample_points + self.batch_size = batch_size + self.num_workers = num_workers + self.pointcloud_samples = pointcloud_samples + + # compute number of samples per sampling method + self.num_samples = np.rint(self.sample_distribution * num_sample_points).astype(np.uint32) + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + path = self.path + self.data[idx] + + try: + if self.density: + voxel_path = path + '/voxelized_point_cloud_{}res_{}density.npz'.format(self.res, self.pointcloud_samples) + if self.pointcloud_samples: + voxel_path = path + '/voxelized_point_cloud_{}res_{}points.npz'.format(self.res, self.pointcloud_samples) + + occupancies = np.unpackbits(np.load(voxel_path)['compressed_occupancies']) + input = np.reshape(occupancies, (self.res,)*3) + + points = [] + coords = [] + df = [] + + for i, num in enumerate(self.num_samples): + boundary_samples_path = path + '/{}boundary_{}_samples.npz'.format('pymesh_', self.sample_sigmas[i]) + boundary_samples_npz = np.load(boundary_samples_path) + boundary_sample_points = boundary_samples_npz['points'] + boundary_sample_coords = boundary_samples_npz['grid_coords'] + boundary_sample_df = boundary_samples_npz['df'] + subsample_indices = np.random.randint(0, len(boundary_sample_points), num) + points.extend(boundary_sample_points[subsample_indices]) + coords.extend(boundary_sample_coords[subsample_indices]) + df.extend(boundary_sample_df[subsample_indices]) + + assert len(points) == self.num_sample_points + assert len(df) == self.num_sample_points + assert len(coords) == self.num_sample_points + except: + print('Error with {}: {}'.format(path, traceback.format_exc())) + raise + + return {'grid_coords':np.array(coords, dtype=np.float32),'df': np.array(df, dtype=np.float32),'points':np.array(points, dtype=np.float32), 'inputs': np.array(input, dtype=np.float32), 'path' : path} + + def get_loader(self, shuffle =True): + + return torch.utils.data.DataLoader( + self, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=shuffle, + worker_init_fn=self.worker_init_fn) + + def worker_init_fn(self, worker_id): + random_data = os.urandom(4) + base_seed = int.from_bytes(random_data, byteorder="big") + np.random.seed(base_seed + worker_id) + + diff --git a/renderer.py b/renderer.py new file mode 100644 index 0000000..d54dbaa --- /dev/null +++ b/renderer.py @@ -0,0 +1,264 @@ +# conda activate pymesh +import math +import numpy as np +import trimesh +import cv2 +import os + +import configs.config_loader as cfg_loader + +import NDF_combine as NDF + + +def str2bool(inp): + return inp.lower() in 'true' + +class Renderer(): + def __init__(self): + self.get_args() + self.create_plane_points_from_bounds() + self.define_screen_points() + self.define_unit_rays() + + def get_args(self): + """ + :return: + """ + self.args = cfg_loader.get_config() + + # print(self.args.cam_position) + # print(self.args.cam_orientation) + os.makedirs(self.args.folder, exist_ok=True) + + def create_plane_points_from_bounds(self): + """ + + Creates a plane of points which acts as the screen for rendering + """ + # create an xy plane + x = np.linspace(-self.args.screen_bound, self.args.screen_bound, self.args.size) + y = np.linspace(-self.args.screen_bound, self.args.screen_bound, self.args.size) + X, Y = np.meshgrid(x, y, indexing='ij') + X = X.reshape((np.prod(X.shape),)) + Y = Y.reshape((np.prod(Y.shape),)) + + # append the third dimension coordinate to the xy plane + points_list = np.column_stack((X, Y)) + points_list = np.insert(points_list, 2, self.args.screen_depth, axis=1) + self.points_list = points_list + + def to_rotation_matrix(self): + """ + Creates rotation matrix from the input euler angles + """ + euler_angles = np.array(self.args.cam_orientation) + R_x = np.array([[1, 0, 0], + [0, math.cos(math.radians(euler_angles[0])), -math.sin(math.radians(euler_angles[0]))], + [0, math.sin(math.radians(euler_angles[0])), math.cos(math.radians(euler_angles[0]))] + ]) + + R_y = np.array([[math.cos(math.radians(euler_angles[1])), 0, math.sin(math.radians(euler_angles[1]))], + [0, 1, 0], + [-math.sin(math.radians(euler_angles[1])), 0, math.cos(math.radians(euler_angles[1]))] + ]) + + R_z = np.array([[math.cos(math.radians(euler_angles[2])), -math.sin(math.radians(euler_angles[2])), 0], + [math.sin(math.radians(euler_angles[2])), math.cos(math.radians(euler_angles[2])), 0], + [0, 0, 1] + ]) + + R = np.dot(R_z, np.dot(R_y, R_x)) + + self.rot_matrix = R + + def to_transf_matrix(self): + """ + Creates a transformation matrix from rotation matrix and translation vector + """ + self.to_rotation_matrix() + + temp_trans = np.array([0, 0, 0]) + temp_trans = np.reshape(temp_trans, (1, 3)) + rot = np.concatenate((self.rot_matrix, temp_trans), axis=0) + rot = np.concatenate((rot, np.reshape(np.array([0, 0, 0, 1]), (4, 1))), axis=1) + + inp_trans = np.reshape(self.args.cam_position, (3,)) + inp_trans = np.concatenate((inp_trans, [1]), axis=0) + + rot[:, 3] = inp_trans + + self.trans_mat = rot + + def append_one(self, arr): + """ + :param arr: + :return: + """ + append = np.ones(arr.shape[0]) + append = np.reshape(append, (append.shape[0], 1)) + new_arr = np.concatenate((arr, append), axis=1) + return new_arr + + def define_screen_points(self): + """ + Transforms the screen points and camera position using the camera translation and orientation information provided by the user + """ + self.create_plane_points_from_bounds() + self.to_transf_matrix() + + cam_loc = np.array([0, 0, 0]) + screen_and_cam = np.vstack((cam_loc, self.points_list)) + screen_and_cam_hom = self.append_one(screen_and_cam) + + # 4 X SIZE^2 + screen_and_cam_hom_T = np.transpose(screen_and_cam_hom, (1, 0)) + screen_and_cam_hom_T_transformed = np.matmul(self.trans_mat, screen_and_cam_hom_T) + + # SIZE^2 X 4 + screen_and_cam_hom_transformed = np.transpose(screen_and_cam_hom_T_transformed, (1, 0)) + + # SIZE^2 X 3 + self.screen_and_cam_transformed = screen_and_cam_hom_transformed[:, :3] + + if self.args.debug_mode: + trimesh.Trimesh(vertices=self.screen_and_cam_transformed, faces=[]).export('setup_camera_rot.off') + + def define_unit_rays(self): + """ + Defines rays from camera to the screen along which + """ + # Separate screen points and camera point + points = self.screen_and_cam_transformed[1:, :] + self.cam_trans = np.reshape(self.screen_and_cam_transformed[0, :], (1, 3)) + + # Define ray paths from camera + ray_vector = (points - self.cam_trans) + + # Normalize ray vectors + norm_ray = np.linalg.norm(ray_vector, ord=2, axis=1) + norm_ray = np.reshape(norm_ray, (self.args.size * self.args.size, 1)) + + self.unit_rays = ray_vector / norm_ray + + def get_lgth_rays(self): + """ + :return: + """ + src_batch = np.repeat([self.args.light_position], self.args.size * self.args.size, axis=0) + rays = src_batch - self.final_points + norm_ray = np.linalg.norm(rays, ord=2, axis=1) + norm_ray = np.reshape(norm_ray, (self.args.size * self.args.size, 1)) + + self.ray_to_src = rays / norm_ray + + def run(self): + """ + Runs the ray marching algorithm + """ + print(self.args) + NDF.loadNDF( + mode = 'test', index = self.args.index, + pointcloud_samples = self.args.pc_samples, + exp_name = self.args.exp_name, data_dir = self.args.data_dir, + split_file = self.args.split_file, sample_distribution = self.args.sample_ratio, + sample_sigmas = self.args.sample_std_dev, res = self.args.input_res + ) + + depth = np.zeros((self.args.size * self.args.size, 1)) + + cam_batch = np.repeat(self.cam_trans, self.args.size * self.args.size, axis=0) + points = cam_batch.copy() + iter = 1 + ray = self.unit_rays.copy() + indices_cont_all = list(range(self.args.size * self.args.size)) + + while len(indices_cont_all) > 0: + + print('Iter:', iter) + dists_points = NDF.predictRotNDF(points) + dists_points = np.reshape(dists_points, (self.args.size * self.args.size, 1)) + + indices_stop = np.where(dists_points < self.args.epsilon)[0] + indices_stop2 = np.where(depth > self.args.max_depth)[0] + indices_stop_all = list(set(indices_stop).union(set(indices_stop2))) + # print(len(indices_stop_all)) + + ray[indices_stop_all] = 0 + setA = set(range(self.args.size * self.args.size)) + setB = set(indices_stop_all) + indices_cont_all = list(setA.difference(setB)) + + + # print(len(indices_cont_all)) + depth[indices_cont_all] = depth[indices_cont_all] + self.args.alpha * dists_points[indices_cont_all] + points = points + (ray * (self.args.alpha * dists_points)) + iter = iter + 1 + + points = points - (self.unit_rays * self.args.step_back) + + self.final_points = points.copy() + + ## NORMALS + self.depth_np = depth.copy() + self.depth_np[self.depth_np > self.args.max_depth] = self.args.max_depth + + dists, gradients = NDF.predictRotGradientNDF(points) + self.final_gradients = gradients.copy() + self.normals = np.reshape(gradients, (self.args.size * self.args.size, 3)) + + def save(self, image, name, size, normalize): + """ + :param image: Input image as np array + :param name: Name of file to be stored + :param size: Size of the image + :param normalize: whether to normalize all values to 0-1 + + Saves individual images + """ + if normalize: + image = (image + 1)/2 + image = np.reshape(image, (self.args.size, self.args.size, size)) + + image = cv2.transpose(image) + image = cv2.flip(image, 0) + image = image[90:610, :] + + cv2.imwrite(os.path.join(self.args.folder, name), np.uint8(255 * image)) + + def save_images(self): + """ + Saves Images after completion of the rendering algorithm + """ + shade = np.sum(np.multiply(-self.unit_rays, self.normals), axis=1) + shade = np.reshape(shade, (shade.shape[0], 1)) + + shade[self.depth_np == self.args.max_depth] = 1 + self.save(shade, 'shade.jpg', 1, True) + + # SHADE WITH LIGhT SOURCE + if self.args.shade: + self.get_lgth_rays() + shd_lgth = np.sum(np.multiply(self.ray_to_src, self.normals), axis=1) + shd_lgth = np.reshape(shd_lgth, (shd_lgth.shape[0], 1)) + shd_lgth[self.depth_np == self.args.max_depth ] = 1 + self.save(shd_lgth, 'shade_src.jpg', 1, True) + + if self.args.normal: + RGB_normals = self.final_gradients.copy() + inds = (self.depth_np == self.args.max_depth) + for j in range(3): + new_arr = np.reshape(RGB_normals[:, j], (self.args.size * self.args.size, 1)) + new_arr[inds] = 1 + + black_pixels_mask = np.all(RGB_normals == [0, 0, 0], axis=-1) + RGB_normals[black_pixels_mask] = np.array([1, 1, 1]) + self.save(RGB_normals, 'normals.jpg', 3, True) + + if self.args.depth: + depth_normalized = np.copy(self.depth_np / self.args.max_depth) + self.save(depth_normalized, 'depth_final.jpg', 1, False) + +if __name__ == "__main__": + renderer = Renderer() + renderer.run() + renderer.save_images()