This commit is contained in:
wxchen
2022-12-29 23:08:25 +08:00
commit 21ad625896
42 changed files with 2336 additions and 0 deletions

View File

@@ -0,0 +1,97 @@
"""
combine_A_and_B.py
Preprocessing for pix2pix
Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
All rights reserved.
BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
"""
import argparse
import os
from multiprocessing import Pool
import cv2
import numpy as np
def image_write(path_A, path_B, path_AB):
im_A = cv2.imread(
path_A, 1
) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_B = cv2.imread(
path_B, 1
) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
# im_A = cv2.transpose(im_A)
# im_B = cv2.transpose(im_B)
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
parser = argparse.ArgumentParser("create image pairs")
parser.add_argument("--fold_A", dest="fold_A", help="input directory for image A", type=str, default="../dataset/50kshoes_edges")
parser.add_argument("--fold_B", dest="fold_B", help="input directory for image B", type=str, default="../dataset/50kshoes_jpg")
parser.add_argument("--fold_AB", dest="fold_AB", help="output directory", type=str, default="../dataset/test_AB")
parser.add_argument("--num_imgs", dest="num_imgs", help="number of images", type=int, default=1000000)
parser.add_argument("--use_AB", dest="use_AB", help="if true: (0001_A, 0001_B) to (0001_AB)", action="store_true")
parser.add_argument("--no_multiprocessing", dest="no_multiprocessing", help="If used, chooses single CPU execution instead of parallel execution", action="store_true", default=False,
)
args = parser.parse_args()
for arg in vars(args):
print("[%s] = " % arg, getattr(args, arg))
splits = os.listdir(args.fold_A)
if not args.no_multiprocessing:
pool = Pool()
for sp in splits:
img_fold_A = os.path.join(args.fold_A, sp)
img_fold_B = os.path.join(args.fold_B, sp)
img_list = os.listdir(img_fold_A)
if args.use_AB:
img_list = [img_path for img_path in img_list if "_A." in img_path]
num_imgs = min(args.num_imgs, len(img_list))
print("split = %s, use %d/%d images" % (sp, num_imgs, len(img_list)))
img_fold_AB = os.path.join(args.fold_AB, sp)
if not os.path.isdir(img_fold_AB):
os.makedirs(img_fold_AB)
print("split = %s, number of images = %d" % (sp, num_imgs))
for n in range(num_imgs):
name_A = img_list[n]
path_A = os.path.join(img_fold_A, name_A)
if args.use_AB:
name_B = name_A.replace("_A.", "_B.")
else:
name_B = name_A
path_B = os.path.join(img_fold_B, name_B)
if os.path.isfile(path_A) and os.path.isfile(path_B):
name_AB = name_A
if args.use_AB:
name_AB = name_AB.replace("_A.", ".") # remove _A
path_AB = os.path.join(img_fold_AB, name_AB)
if not args.no_multiprocessing:
pool.apply_async(image_write, args=(path_A, path_B, path_AB))
else:
im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
if not args.no_multiprocessing:
pool.close()
pool.join()

View File

@@ -0,0 +1,109 @@
import glob
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.model_selection import train_test_split
def create_pixel_csv(img_dir: str, save_dir: str, img_type: str):
"""
Creates a CSV file with the pixel coordinates of the image
:param img_dir: image directory containing the images
:param save_dir: directory for saving the CSV file per image
:param img_type: color or normal
:return: saves the CSV file per image0
"""
imgs = sorted(glob.glob(f"{img_dir}/*.png"))
print(f"Found {len(imgs)} {img_type} images")
for idx, img_path in enumerate(imgs):
img = Image.open(img_path)
img_np = np.array(img)
xy_coords = np.flip(
np.column_stack(np.where(np.all(img_np >= 0, axis=2))), axis=1
)
rgb = np.reshape(img_np, (np.prod(img_np.shape[:2]), 3))
# Add pixel numbers in front
pixel_numbers = np.expand_dims(np.arange(1, xy_coords.shape[0] + 1), axis=1)
value = np.hstack([pixel_numbers, xy_coords, rgb])
# Properly save as CSV
if img_type == "color":
df = pd.DataFrame(value, columns=["pixel_number", "X", "Y", "R", "G", "B"])
del df["pixel_number"]
df.to_csv(f"{save_dir}/image_{idx}.csv", sep=",", index=False)
elif img_type == "normal":
df = pd.DataFrame(value, columns=["pixel_number", "X", "Y", "Nx", "Ny", "Nz"])
del df["pixel_number"]
df.to_csv(f"{save_dir}/image_{idx}.csv", sep=",", index=False)
print(f"{img_type} image CSV written for image {idx}")
def combine_csv(csv_dir: str, img_type: str):
"""
Combines all the CSV files in the directory into one CSV file for later use in training
:param csv_dir: directory containing the CSV files
:param img_type: color or normal
"""
csv_files=(glob.glob(f"{csv_dir}/*.csv"))
print(f"Found {len(csv_files)} {img_type} CSV files")
df = pd.concat([pd.read_csv(f, sep=",") for f in (glob.glob(f"{csv_dir}/*.csv"))])
df.to_csv(f"{csv_dir}/combined.csv", sep=",", index=False)
print(f"Combined CSV written for {img_type} images")
check_nans(f"{csv_dir}/combined.csv")
print("----------------------------------------------------")
def create_train_test_csv(save_dir: str, normal_path: str, color_path: str):
"""
Creates a CSV file with the pixel coordinates of the image. Samples 4% from zeros.
Splits the cleaned dataset into train and test (80%/20%)
:param save_dir: path for train_test_split folder
:param normal_path: path to combined normal CSV file
:param color_path: path to combined color CSV file
"""
seed=42
rgb_df = pd.read_csv(color_path, sep=",")
normal_df = pd.read_csv(normal_path, sep=",")
rgb_df['Nx'] = normal_df['Nx']
rgb_df['Ny'] = normal_df['Ny']
rgb_df['Nz'] = normal_df['Nz']
zeros_df = rgb_df[(rgb_df['Nx'] == 127) & (rgb_df['Ny'] == 127) ^ (rgb_df['Nz'] == 127)]
print(f"Found {len(zeros_df)} zeros in the dataset")
non_zeros_df = rgb_df[(rgb_df['Nx'] != 127) | (rgb_df['Ny'] != 127) & (rgb_df['Nz'] != 127)]
print(f"Found {len(non_zeros_df)} non-zeros in the dataset")
zeros_df.to_csv(f"{save_dir}/zeros.csv", sep=",", index=False)
non_zeros_df.to_csv(f"{save_dir}/non_zeros.csv", sep=",", index=False)
print("----------------------------------------------------")
# sampling 4% of zeros
clean_df = zeros_df.sample(frac=0.04, random_state=seed)
clean_df = pd.concat([clean_df, non_zeros_df])
clean_df.to_csv(f"{save_dir}/clean-data.csv", sep=",", index=False)
print(f"Clean data CSV of {len(clean_df)} written")
# split into train and test
train_df,test_df=train_test_split(clean_df,test_size=0.2,random_state=seed)
train_df.to_csv(f"{save_dir}/train.csv", sep=",", index=False)
test_df.to_csv(f"{save_dir}/test.csv", sep=",", index=False)
print(f"Train set of size {len(train_df)} and Test set of size {len(test_df)} written")
def check_nans(csv_path: str):
"""
Checks if there are any NaNs in the CSV file
:param csv_path: path to the CSV file
:return: True if there are no NaNs, False otherwise
"""
df = pd.read_csv(csv_path, sep=",")
if df.isnull().values.any():
nans=df.isnull().sum().sum()
print(f"Found {nans} NaNs in {csv_path}")
print("Replacing NaNs with mean")
df.fillna(df.mean(), inplace=True)
df.to_csv(csv_path, sep=",", index=False)
print(f"NaNs replaced in {csv_path}")
else:
print("No NaNs found.Perfect!")

View File

@@ -0,0 +1,22 @@
"""
Data loader for the color-normal datasets
"""
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from digit_depth.dataio.digit_dataset import DigitRealImageAnnotDataset
def data_loader(dir_dataset, params):
"""A data loader for the color-normal datasets
Args:
dir_dataset: path to the dataset
params: a dict of parameters
"""
transform = transforms.Compose([transforms.ToTensor()])
dataset = DigitRealImageAnnotDataset( dir_dataset=dir_dataset, annot_file=params.annot_file,
transform=transform, annot_flag=params.annot_flag)
dataloader = DataLoader(dataset, batch_size=params.batch_size, shuffle=params.shuffle,
num_workers=params.num_workers)
return dataloader, dataset

View File

@@ -0,0 +1,43 @@
import glob
import pandas as pd
import torch
from PIL import Image
from torch.utils.data import Dataset
class DigitRealImageAnnotDataset(Dataset):
def __init__( self, dir_dataset, annot_file, transform=None, annot_flag=True, img_type="png" ):
self.dir_dataset = dir_dataset
print(f"Loading dataset from {dir_dataset}")
self.transform = transform
self.annot_flag = annot_flag
# a list of image paths sorted. dir_dataset is the root dir of the datasets (color)
self.img_files = sorted(glob.glob(f"{self.dir_dataset}/*.{img_type}"))
print(f"Found {len(self.img_files)} images")
if self.annot_flag:
self.annot_dataframe = pd.read_csv(annot_file, sep=",")
def __getitem__(self, idx):
"""Returns a tuple of (img, annot) where annot is a tensor of shape (3,1)"""
# read in image
img = Image.open(self.img_files[idx])
img = self.transform(img)
img = img.permute(0, 2, 1) # (3,240,320) -> (3,320,240)
# read in region annotations
if self.annot_flag:
img_name = self.img_files[idx]
row_filter = self.annot_dataframe["img_names"] == img_name
region_attr = self.annot_dataframe.loc[
row_filter, ["center_x", "center_y", "radius"]
]
annot = (torch.tensor(region_attr.values, dtype=torch.int32) if (len(region_attr) > 0) else torch.tensor([]))
data = img
if self.annot_flag:
data = (img, annot)
return data
def __len__(self):
return len(self.img_files)

View File

@@ -0,0 +1,47 @@
"""
Script for generating sphere ground truth normal images.
"""
import math
import numpy as np
def generate_sphere_gt_normals(img_mask, center_x, center_y, radius):
"""
Generates sphere ground truth normal images for an image.
Args:
img_mask: a numpy array of shape [H, W, 3]
center_x: x coordinate of the center of the sphere
center_y: y coordinate of the center of the sphere
radius: the radius of the sphere
Returns:
img_normal: a numpy array of shape [H, W, 3]
"""
img_normal = np.zeros(img_mask.shape, dtype="float64")
for y in range(img_mask.shape[0]):
for x in range(img_mask.shape[1]):
img_normal[y, x, 0] = 0.0
img_normal[y, x, 1] = 0.0
img_normal[y, x, 2] = 1.0
if np.sum(img_mask[y, x, :]) > 0:
dist = np.sqrt((x - center_x) ** 2 + (y - center_y) ** 2)
ang_xz = math.acos(dist / radius)
ang_xy = math.atan2(y - center_y, x - center_x)
nx = math.cos(ang_xz) * math.cos(ang_xy)
ny = math.cos(ang_xz) * math.sin(ang_xy)
nz = math.sin(ang_xz)
img_normal[y, x, 0] = nx
img_normal[y, x, 1] = -ny
img_normal[y, x, 2] = nz
norm_val = np.linalg.norm(img_normal[y, x, :])
img_normal[y, x, :] = img_normal[y, x, :] / norm_val
# img_normal between [-1., 1.], converting to [0., 1.]
img_normal = (img_normal + 1.0) * 0.5
return img_normal